Merge "sched: fix compilation error with !CONFIG_SMP" into msm-4.8
diff --git a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
new file mode 100644
index 0000000..6ddc725
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
@@ -0,0 +1,148 @@
+Attached MDM Modem Devices
+
+External modems are devices that are attached to the msm and controlled by gpios.
+There is also a data channel between the msm and the external modem that sometimes needs
+to be reset.
+
+Required Properties:
+- compatible:	The bus devices need to be compatible with
+		"qcom,mdm2-modem", "qcom,ext-mdm9x25", "qcom,ext-mdm9x35", "qcom, ext-mdm9x45",
+		"qcom,ext-mdm9x55".
+
+Required named gpio properties:
+- qcom,mdm2ap-errfatal-gpio: gpio for the external modem to indicate to the apps processor
+		of an error fatal condition on the modem.
+- qcom,ap2mdm-errfatal-gpio: gpio for the apps processor to indicate to the external modem
+		of an error fatal condition on the apps processor.
+- qcom,mdm2ap-status-gpio: gpio to indicate to the apps processor when there is a watchdog
+		bite on the external modem.
+- qcom,ap2mdm-status-gpio: gpio for the apps processor to indicate to the modem that an apps
+		processor watchdog bite has occurred.
+- qcom,ap2mdm-soft-reset-gpio: gpio for the apps processor to use to soft-reset the external
+		modem. If the flags parameter has a value of 0x1 then the gpio is active LOW.
+
+Required Interrupts:
+- "err_fatal_irq": Interrupt generated on the apps processor when the error fatal gpio is pulled
+		high by the external modem.
+- "status_irq": Interrupt generated on the apps processor when the mdm2ap-status gpio falls low
+		on the external modem. This usually indicates a watchdog bite on the modem.
+- "plbrdy_irq": Interrupt generated on the aps processor when the mdm2ap-pblrdy gpio is pulled
+		either high or low by the external modem. This is an indication that the modem
+		has rebooted.
+- "mdm2ap_vddmin_irq": Interrupt generated on the apps processor when the external modem goes
+		into vddmin power state.
+
+Optional named gpio properties:
+- qcom,mdm2ap-pblrdy-gpio: gpio used by some external modems to indicate when the modem has
+		booted into the PBL bootloader.
+- qcom,ap2mdm-wakeup-gpio: gpio used by the apps processor to wake the external modem
+		out of a low power state.
+- qcom,ap2mdm-chnl-rdy-gpio: gpio used by the apps processor to inform the external modem
+		that data link is ready.
+- qcom,mdm2ap-wakeup-gpio: gpio from the external modem to the apps processor to wake it
+		out of a low power state.
+- qcom,ap2mdm-vddmin-gpio: gpio to indicate to the external modem when the apps processor
+		is about to enter vddmin power state.
+- qcom,mdm2ap-vddmin-gpio: gpio used by the external modem to inform the apps processor
+		when it is about to enter vddmin power state.
+- qcom,ap2mdm-kpdpwr-gpio: gpio used to simulate a power button press on the external
+		modem. Some modems use this as part of their initial power-up sequence.
+		If the "flags" parameter has a value of 0x1 then it is active LOW.
+- qcom,ap2mdm-pmic-pwr-en-gpio: Some modems need this gpio for the apps processor to enable
+		the pmic on the external modem.
+- qcom,use-usb-port-gpio: some modems use this gpio to switch a port connection from uart to usb.
+		This is used during firmware upgrade of some modems.
+- qcom,mdm-link-detect-gpio: some modems may support two interfaces. This gpio
+		indicates whether only one or both links can be used.
+
+Optional driver parameters:
+- qcom,ramdump-delay-ms: time in milliseconds to wait before starting to collect ramdumps.
+		This interval is the time to wait after an error on the external modem is
+		signaled to the apps processor before starting to collect ramdumps. Its
+		value depends on the type of external modem (e.g. MDM vs QSC), and how
+		error fatal handing is done on the modem.
+		The default value is 2 seconds (2000 milliseconds) as specified by the
+		mdm9x15 software developer. Consultation with the developer of the modem
+		software is required to determine this value for that modem.
+- qcom,ps-hold-delay-ms: minimum delay in milliseconds between consecutive PS_HOLD toggles.
+		SGLTE targets that use a QSC1215 modem require a minimum delay between consecutive
+		toggling of the PS_HOLD pmic input. For one target it is 500 milliseconds but it
+		may vary depending on the target and how the external modem is connected. The value
+		is specified by the hardware designers.
+- qcom,early-power-on: boolean flag to indicate if to power on the modem when the device is probed.
+- qcom,sfr-query: boolean flag to indicate if to query the modem for a reset reason.
+- qcom,no-powerdown-after-ramdumps: boolean flag to indicate if to power down the modem after ramdumps.
+- qcom,no-a2m-errfatal-on-ssr: boolean to tell driver not to raise ap2mdm errfatal during SSR.
+- qcom,no-reset-on-first-powerup: boolean to tell driver not to reset the modem when first
+		powering up the modem.
+- qcom,ramdump-timeout-ms: ramdump timeout interval in milliseconds.
+		This interval is the time to wait for collection of the external modem's ramdump
+		to complete. It's value depends on the speed of the data connection between the
+		external modem and the apps processor on the platform. If the connection is a
+		UART port then this delay needs to be longer in order to avoid premature timeout
+		of the ramdump collection.
+		The default value is 2 minutes (120000 milliseconds) which is based on the
+		measured time it takes over a UART connection. It is reduced when the data
+		connection is an HSIC port. The value is usually tuned empirically for a
+		particular target.
+- qcom,image-upgrade-supported: boolean flag to indicate if software upgrade is supported.
+- qcom,support-shutdown: boolean flag to indicate if graceful shutdown is supported.
+- qcom,vddmin-drive-strength: drive strength in milliamps of the ap2mdm-vddmin gpio.
+		The ap2mdm_vddmin gpio is controlled by the RPM processor. It is pulled low
+		to indicate to the external modem that the apps processor has entered vddmin
+		state, and high to indicate the reverse. Its parameters are passed to the RPM
+		software from the HLOS because the RPM software has to way of saving this type
+		of configuration when an external modem is attached.
+		The value of the drive strength is specified by the hardware designers. A value
+		of 8 milliamps is typical.
+		This property is ignored if the property "qcom,ap2mdm-vddmin-gpio" is
+		not set.
+- qcom,vddmin-modes: a string indicating the "modes" requested for the ap2mdm-vddmin gpio.
+		This value is passed to RPM and is used by the RPM module to determine the
+		gpio mux function. The only currently supported modes string is "normal" and
+		corresponds to the value 0x03 that is passed to RPM.
+- qcom,restart-group: List of subsystems that will need to restart together.
+- qcom,mdm-dual-link: Boolean indicates whether both links can used for
+		communication.
+- qcom,ssctl-instance-id: Instance id used by the subsystem to connect with the SSCTL service.
+- qcom,sysmon-id: platform device id that sysmon is probed with for the subsystem.
+- qcom,pil-force-shutdown: Boolean. If set, the SSR framework will not trigger graceful shutdown
+			   on behalf of the subsystem driver.
+
+Example:
+	mdm0: qcom,mdm0 {
+                compatible = "qcom,mdm2-modem";
+		cell-index = <0>;
+		#address-cells = <0>;
+                interrupt-parent = <&mdm0>;
+                interrupts = <0 1 2 3>;
+                #interrupt-cells = <1>;
+                interrupt-map-mask = <0xffffffff>;
+                interrupt-map =
+			<0 &msmgpio 82 0x3
+			1 &msmgpio 46 0x3
+			2 &msmgpio 80 0x3
+			3 &msmgpio 27 0x3>;
+                interrupt-names =
+			"err_fatal_irq",
+			"status_irq",
+			"plbrdy_irq",
+			"mdm2ap_vddmin_irq";
+
+                qcom,mdm2ap-errfatal-gpio = <&msmgpio 82 0x00>;
+		qcom,ap2mdm-errfatal-gpio = <&msmgpio 106 0x00>;
+		qcom,mdm2ap-status-gpio   = <&msmgpio 46 0x00>;
+		qcom,ap2mdm-status-gpio   = <&msmgpio 105 0x00>;
+		qcom,ap2mdm-soft-reset-gpio = <&msmgpio 24 0x00>;
+		qcom,mdm2ap-pblrdy-gpio = <&msmgpio 80 0x00>;
+		qcom,ap2mdm-wakeup-gpio = <&msmgpio 104 0x00>;
+		qcom,ap2mdm-vddmin-gpio = <&msmgpio 108 0x00>;
+		qcom,mdm2ap-vddmin-gpio = <&msmgpio 27 0x00>;
+
+                qcom,ramdump-delay-ms = <2000>;
+                qcom,ramdump-timeout-ms = <120000>;
+                qcom,vddmin-modes  = "normal";
+                qcom,vddmin-drive-strength = <8>;
+		qcom,ssctl-instance-id = <10>;
+		qcom,sysmon-id = <20>;
+        };
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
similarity index 85%
rename from Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt
rename to Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
index fbbacd9..6f28969 100644
--- a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt
+++ b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
@@ -6,7 +6,7 @@
 
 Required properties:
 
-- compatible : should be "aspeed,ast2400-bt-bmc"
+- compatible : should be "aspeed,ast2400-ibt-bmc"
 - reg: physical address and size of the registers
 
 Optional properties:
@@ -17,7 +17,7 @@
 Example:
 
 	ibt@1e789140 {
-		compatible = "aspeed,ast2400-bt-bmc";
+		compatible = "aspeed,ast2400-ibt-bmc";
 		reg = <0x1e789140 0x18>;
 		interrupts = <8>;
 	};
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
new file mode 100644
index 0000000..bd78623
--- /dev/null
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -0,0 +1,127 @@
+Qualcomm Technologies Inc MSS QDSP6v5 Peripheral Image Loader
+
+pil-qdsp6v5-mss is a peripheral image loader (PIL) driver. It is used for
+loading QDSP6v5 (Hexagon) firmware images for modem subsystems into memory and
+preparing the subsystem's processor to execute code. It's also responsible for
+shutting down the processor when it's not needed.
+
+Required properties:
+- compatible:	      Must be "qcom,pil-q6v5-mss" or "qcom,pil-q6v55-mss" or
+			"qcom,pil-q6v56-mss".
+- reg:		      Pairs of physical base addresses and region sizes of
+		      memory mapped registers.
+- reg-names:	      Names of the bases for the above registers. "qdsp6_base",
+		      "rmb_base", "restart_reg" or "restart_reg_sec"(optional
+		      for secure mode) are expected.
+		      If "halt_base" is in same 4K pages this register then
+		      this will be defined else "halt_q6", "halt_modem",
+		      "halt_nc" is required.
+- interrupts:         The modem watchdog interrupt
+- vdd_cx-supply:      Reference to the regulator that supplies the vdd_cx domain.
+- vdd_cx-voltage:     Voltage corner/level(max) for cx rail.
+- vdd_mx-supply:      Reference to the regulator that supplies the memory rail.
+- vdd_mx-uV:          Voltage setting for the mx rail.
+- qcom,firmware-name: Base name of the firmware image. Ex. "mdsp"
+
+Optional properties:
+- vdd_mss-supply:     Reference to the regulator that supplies the processor.
+		      This may be a shared regulator that is already voted
+		      on in the PIL proxy voting code (and also managed by the
+		      modem on its own), hence we mark it as as optional.
+- vdd_pll-supply:     Reference to the regulator that supplies the PLL's rail.
+- qcom,vdd_pll:       Voltage to be set for the PLL's rail.
+- reg-names:          "cxrail_bhs_reg" - control register for modem power
+		      domain.
+- clocks:	      Array of <clock_controller_phandle clock_reference> listing
+		      all the clocks that are accesed by this subsystem.
+- qcom,proxy-clock-names:  Names of the clocks that need to be turned on/off during
+			   proxy voting/unvoting.
+- qcom,active-clock-names: Names of the clocks that need to be turned on for the
+			   subsystem to run. Turned off when the subsystem is shutdown.
+- clock-names:		   Names of all the clocks that are accessed by the subsystem.
+- qcom,is-not-loadable: Boolean- Present if the image does not need to
+			be loaded.
+- qcom,pil-self-auth: Boolean- True if authentication is required.
+- qcom,mem-protect-id: Virtual ID used by PIL to call into TZ/HYP to protect/unprotect
+			subsystem related memory.
+- qcom,gpio-err-fatal: GPIO used by the modem to indicate error fatal to the apps.
+- qcom,gpio-err-ready: GPIO used by the modem to indicate error ready to the apps.
+- qcom,gpio-proxy-unvote: GPIO used by the modem to trigger proxy unvoting in
+  the apps.
+- qcom,gpio-force-stop: GPIO used by the apps to force the modem to shutdown.
+- qcom,gpio-stop-ack: GPIO used by the modem to ack force stop or a graceful stop
+		      to the apps.
+- qcom,gpio-ramdump-disable: GPIO used by the modem to inform the apps that ramdump
+			     collection should be disabled.
+- qcom,gpio-shutdown-ack: GPIO used by the modem to indicate that it has done the
+			  necessary cleanup and that the apps can move forward with
+			  the shutdown sequence.
+- qcom,restart-group: List of subsystems that will need to restart together.
+- qcom,mba-image-is-not-elf:	Boolean- Present if MBA image doesn't use the ELF
+				format.
+- qcom,ssctl-instance-id: Instance id used by the subsystem to connect with the SSCTL
+			  service.
+- qcom,sysmon-id:	platform device id that sysmon is probed with for the subsystem.
+- qcom,override-acc: Boolean- Present if we need to override the default ACC settings
+- qcom,ahb-clk-vote: Boolean- Present if we need to remove the vote for the mss_cfg_ahb
+		     clock after the modem boots up
+- qcom,pnoc-clk-vote: Boolean- Present if the modem needs the PNOC bus to be
+		      clocked before it boots up
+- qcom,qdsp6v56-1-3: Boolean- Present if the qdsp version is v56 1.3
+- qcom,qdsp6v56-1-5: Boolean- Present if the qdsp version is v56 1.5
+- qcom,edge:		GLINK logical name of the remote subsystem
+- qcom,pil-force-shutdown: Boolean. If set, the SSR framework will not trigger graceful shutdown
+                           on behalf of the subsystem driver.
+- qcom,pil-mss-memsetup: Boolean - True if TZ need to be informed of modem start address and size.
+- qcom,pas-id:	      pas_id of the subsystem.
+- qcom,qdsp6v56-1-8: Boolean- Present if the qdsp version is v56 1.8
+- qcom,qdsp6v56-1-8-inrush-current: Boolean- Present if the qdsp version is V56 1.8 and has in-rush
+				    current issue.
+- qcom,qdsp6v61-1-1: Boolean- Present if the qdsp version is v61 1.1
+- qcom,qdsp6v62-1-2: Boolean- Present if the qdsp version is v62 1.2
+- qcom,qdsp6v62-1-5: Boolean- Present if the qdsp version is v62 1.5
+- qcom,mx-spike-wa: Boolean- Present if we need to assert QDSP6 I/O clamp, memory
+		    wordline clamp, and compiler memory clamp during MSS restart.
+- qcom,qdsp6v56-1-10: Boolean- Present if the qdsp version is v56 1.10
+- qcom,override-acc-1: Override the default ACC settings with this value if present.
+
+Example:
+	qcom,mss@fc880000 {
+		compatible = "qcom,pil-q6v5-mss";
+		reg = <0xfc880000 0x100>,
+		      <0xfd485000 0x400>,
+		      <0xfc820000 0x020>,
+		      <0xfc401680 0x004>;
+		reg-names = "qdsp6_base", "halt_base", "rmb_base",
+			    "restart_reg";
+		interrupts = <0 24 1>;
+		vdd_mss-supply = <&pm8841_s3>;
+		vdd_cx-supply = <&pm8841_s2>;
+		vdd_cx-voltage = <7>;
+		vdd_mx-supply = <&pm8841_s1>;
+		vdd_mx-uV = <105000>;
+
+		clocks = <&clock_rpm clk_xo_pil_mss_clk>,
+			 <&clock_gcc clk_gcc_mss_cfg_ahb_clk>,
+			 <&clock_gcc clk_gcc_mss_q6_bimc_axi_clk>,
+			 <&clock_gcc clk_gcc_boot_rom_ahb_clk>;
+		clock-names = "xo", "iface_clk", "bus_clk", "mem_clk";
+		qcom,proxy-clock-names = "xo";
+		qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk";
+
+		qcom,is-not-loadable;
+		qcom,firmware-name = "mba";
+		qcom,pil-self-auth;
+		qcom,mba-image-is-not-elf;
+		qcom,override-acc;
+
+		/* GPIO inputs from mss */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+
+		/* GPIO output to mss */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+		qcom,ssctl-instance-id = <12>;
+		qcom,sysmon-id = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
new file mode 100644
index 0000000..d7edafc
--- /dev/null
+++ b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
@@ -0,0 +1,135 @@
+* Generic Subsystem Peripheral Image Loader
+
+subsys-pil-tz is a generic peripheral image loader (PIL) driver. It is
+used for loading the firmware images of the subsystems into memory and
+preparing the subsystem's processor to execute code. It's also responsible
+for shutting down the processor when it's not needed.
+
+Required properties:
+- compatible:	      Must be "qcom,pil-tz-generic"
+- qcom,firmware-name: Base name of the firmware image.
+
+Optional properties:
+- reg:		      Pairs of physical base addresses and region sizes of
+		      memory mapped registers.
+- reg-names:	      Names of the bases for the above registers. Not required for
+		      PIL usage. Ex. "wrapper_base", "vbif_base".
+- interrupts:	      Subsystem to Apps watchdog bite interrupt.
+- vdd_'reg'-supply: Reference to the regulator that supplies the corresponding
+		    'reg' domain.
+- qcom,proxy-reg-names: Names of the regulators that need to be turned on/off
+			during proxy voting/unvoting.
+- qcom,active-reg-names: Names of the regulators that need to be turned on for the
+			subsystem to run. Turned off when the subsystem is shutdown.
+- qcom,vdd_'reg'-uV-uA:    Voltage and current values for the 'reg' regulator.
+- qcom,proxy-clock-names:  Names of the clocks that need to be turned on/off during
+			   proxy voting/unvoting.
+- qcom,active-clock-names: Names of the clocks that need to be turned on for the
+			   subsystem to run. Turned off when the subsystem is shutdown.
+- clock-names:	      Names of all the clocks that are accessed by the subsystem.
+- qcom,<clock-name>-freq: Frequency to be set for that clock in Hz. If the property
+			  isn't added for a clock, then the default clock frequency
+			  would be set to 19200000 Hz.
+- qcom,msm-bus,name:  Name of the bus client for the subsystem.
+- qcom,msm-bus,num-cases: Number of use-cases.
+- qcom,msm-bus,num-paths: Number of paths.
+- qcom,msm-bus,active-only: If not set, uses the dual context by default.
+- qcom,msm-bus,vectors-KBps: Vector array of master id, slave id, arbitrated
+			     bandwidth and instantaneous bandwidth.
+- qcom,pas-id:	      pas_id of the subsystem.
+- qcom,proxy-timeout-ms: Proxy vote timeout value for the subsystem.
+- qcom,smem-id:	      ID of the SMEM item for the subsystem.
+- qcom,is-not-loadable: Boolean. Present if the subsystem's firmware image does not
+			need to be loaded.
+- qcom,pil-no-auth: Boolean. Present if the subsystem is not authenticated and brought
+		    out of reset by using the PIL ops.
+- qcom,mem-protect-id: Virtual ID used by PIL to call into TZ/HYP to protect/unprotect
+			subsystem related memory.
+- qcom,gpio-err-fatal: GPIO used by the subsystem to indicate error fatal to the apps.
+- qcom,gpio-err-ready: GPIO used by the subsystem to indicate error ready to the apps.
+- qcom,gpio-proxy-unvote: GPIO used by the subsystem to trigger proxy unvoting in
+			  the apps.
+- qcom,gpio-force-stop: GPIO used by the apps to force the subsystem to shutdown.
+- qcom,gpio-stop-ack: GPIO used by the subsystem to ack force stop or a graceful stop
+		      to the apps.
+- qcom,restart-group: List of subsystems that will need to restart together.
+- qcom,keep-proxy-regs-on: Boolean. Present if during proxy unvoting, PIL needs to leave
+			the regulators enabled after removing the voltage/current votes.
+- qcom,edge:		GLINK logical name of the remote subsystem
+- qcom,ssctl-instance-id: Instance id used by the subsystem to connect with the SSCTL
+			  service.
+- qcom,sysmon-id:	platform device id that sysmon is probed with for the subsystem.
+- qcom,pil-force-shutdown: Boolean. If set, the SSR framework will not trigger graceful shutdown
+                           on behalf of the subsystem driver.
+- qcom,pil-generic-irq-handler: generic interrupt handler used for communication with subsytem
+				based on bit values in scsr registers.
+- qcom,spss-scsr-bits: array of bit positions into the scsr registers used in generic handler.
+- qcom,complete-ramdump: Boolean. If set, complete ramdump i.e. region between start address of
+			first segment to end address of last segment will be collected without
+			leaving any hole in between.
+
+Example:
+	qcom,venus@fdce0000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0xfdce0000 0x4000>,
+		      <0xfdc80000 0x400>;
+
+		vdd-supply = <&gdsc_venus>;
+		qcom,proxy-reg-names = "vdd";
+		clock-names = "core_clk", "iface_clk", "bus_clk", "mem_clk",
+				"scm_core_clk", "scm_iface_clk", "scm_bus_clk",
+				"scm_core_clk_src";
+		qcom,proxy-clock-names = "core_clk", "iface_clk", "bus_clk",
+					"mem_clk", "scm_core_clk",
+					"scm_iface_clk", "scm_bus_clk",
+					"scm_core_clk_src";
+		qcom,scm_core_clk_src-freq = <50000000>;
+
+		qcom,msm-bus,name = "pil-venus";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,active-only = <0>;
+		qcom,msm-bus,vectors-KBps =
+				<63 512 0 0>,
+				<63 512 0 304000>;
+
+		qcom,pas-id = <9>;
+		qcom,proxy-timeout-ms = <2000>;
+		qcom,firmware-name = "venus";
+	};
+
+	qcom,lpass@fe200000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0xfe200000 0x00100>,
+		      <0xfd485100 0x00010>,
+		      <0xfc4016c0 0x00004>;
+
+		interrupts = <0 162 1>;
+
+		vdd_cx-supply = <&pm8841_s2_corner>;
+		qcom,proxy-reg-names = "vdd_cx";
+		qcom,vdd_cx-uV-uA = <7 100000>;
+		clock-names = "bus_clk", "xo", "scm_core_clk", "scm_iface_clk",
+				"scm_bus_clk", "scm_core_clk_src";
+		qcom,active-clock-names = "bus_clk";
+		qcom,proxy-clock-names = "xo", "scm_core_clk", "scm_iface_clk",
+					"scm_bus_clk", "scm_core_clk_src";
+		qcom,scm_core_clk_src-freq = <50000000>;
+
+		qcom,smem-id = <423>;
+		qcom,pas-id = <1>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,firmware-name = "adsp";
+		qcom,edge = "lpass";
+
+		/* GPIO inputs from lpass */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
+
+		/* GPIO output to lpass */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+		qcom,ssctl-instance-id = <14>;
+		qcom,sysmon-id = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
index fd40c85..462b04e8 100644
--- a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
+++ b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
@@ -12,7 +12,7 @@
 
 Optional properties:
 - ti,dmic: phandle for the OMAP dmic node if the machine have it connected
-- ti,jack_detection: Need to be present if the board capable to detect jack
+- ti,jack-detection: Need to be present if the board capable to detect jack
   insertion, removal.
 
 Available audio endpoints for the audio-routing table:
diff --git a/Documentation/i2c/i2c-topology b/Documentation/i2c/i2c-topology
index e0aefee..1a014fe 100644
--- a/Documentation/i2c/i2c-topology
+++ b/Documentation/i2c/i2c-topology
@@ -326,7 +326,7 @@
 
 This is a good topology.
 
-                                   .--------.
+                                    .--------.
                    .----------.  .--| dev D1 |
                    |  parent- |--'  '--------'
                 .--|  locked  |     .--------.
@@ -350,7 +350,7 @@
 
 This is a good topology.
 
-                                   .--------.
+                                    .--------.
                    .----------.  .--| dev D1 |
                    |   mux-   |--'  '--------'
                 .--|  locked  |     .--------.
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 6d6c07c..63912ef3 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -67,13 +67,14 @@
 Switch tagging protocols
 ------------------------
 
-DSA currently supports 4 different tagging protocols, and a tag-less mode as
+DSA currently supports 5 different tagging protocols, and a tag-less mode as
 well. The different protocols are implemented in:
 
 net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
 net/dsa/tag_dsa.c: Marvell's original DSA tag
 net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
 net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
+net/dsa/tag_qca.c: Qualcomm's 2 bytes tag
 
 The exact format of the tag protocol is vendor specific, but in general, they
 all contain something which:
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 739db9a..6bbceb9 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -777,6 +777,17 @@
 conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios
 such as migration.
 
+When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the
+set of bits that KVM can return in struct kvm_clock_data's flag member.
+
+The only flag defined now is KVM_CLOCK_TSC_STABLE.  If set, the returned
+value is the exact kvmclock value seen by all VCPUs at the instant
+when KVM_GET_CLOCK was called.  If clear, the returned value is simply
+CLOCK_MONOTONIC plus a constant offset; the offset can be modified
+with KVM_SET_CLOCK.  KVM will try to make all VCPUs follow this clock,
+but the exact value read by each VCPU could differ, because the host
+TSC is not stable.
+
 struct kvm_clock_data {
 	__u64 clock;  /* kvmclock current value */
 	__u32 flags;
diff --git a/MAINTAINERS b/MAINTAINERS
index 851b89b..ad9b965 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7084,6 +7084,7 @@
 LED SUBSYSTEM
 M:	Richard Purdie <rpurdie@rpsys.net>
 M:	Jacek Anaszewski <j.anaszewski@samsung.com>
+M:	Pavel Machek <pavel@ucw.cz>
 L:	linux-leds@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
 S:	Maintained
@@ -8057,6 +8058,7 @@
 F:	include/linux/mlx4/
 
 MELLANOX MLX5 core VPI driver
+M:	Saeed Mahameed <saeedm@mellanox.com>
 M:	Matan Barak <matanb@mellanox.com>
 M:	Leon Romanovsky <leonro@mellanox.com>
 L:	netdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index 9074181..c9dfa38 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
@@ -403,11 +403,12 @@
 		   -fno-strict-aliasing -fno-common \
 		   -Werror-implicit-function-declaration \
 		   -Wno-format-security \
-		   -std=gnu89
+		   -std=gnu89 $(call cc-option,-fno-PIE)
+
 
 KBUILD_AFLAGS_KERNEL :=
 KBUILD_CFLAGS_KERNEL :=
-KBUILD_AFLAGS   := -D__ASSEMBLY__
+KBUILD_AFLAGS   := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
 KBUILD_AFLAGS_MODULE  := -DMODULE
 KBUILD_CFLAGS_MODULE  := -DMODULE
 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
index dec4b07..3799396 100644
--- a/arch/arm/boot/dts/imx53-qsb.dts
+++ b/arch/arm/boot/dts/imx53-qsb.dts
@@ -64,8 +64,8 @@
 			};
 
 			ldo3_reg: ldo3 {
-				regulator-min-microvolt = <600000>;
-				regulator-max-microvolt = <1800000>;
+				regulator-min-microvolt = <1725000>;
+				regulator-max-microvolt = <3300000>;
 				regulator-always-on;
 			};
 
@@ -76,8 +76,8 @@
 			};
 
 			ldo5_reg: ldo5 {
-				regulator-min-microvolt = <1725000>;
-				regulator-max-microvolt = <3300000>;
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3600000>;
 				regulator-always-on;
 			};
 
@@ -100,14 +100,14 @@
 			};
 
 			ldo9_reg: ldo9 {
-				regulator-min-microvolt = <1200000>;
+				regulator-min-microvolt = <1250000>;
 				regulator-max-microvolt = <3600000>;
 				regulator-always-on;
 			};
 
 			ldo10_reg: ldo10 {
-				regulator-min-microvolt = <1250000>;
-				regulator-max-microvolt = <3650000>;
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3600000>;
 				regulator-always-on;
 			};
 		};
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index 0ff1c2d..26cce4d 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -13,6 +13,11 @@
 		};
 	};
 
+	memory@80000000 {
+		device_type = "memory";
+		reg = <0x80000000 0>;
+	};
+
 	wl12xx_vmmc: wl12xx_vmmc {
 		compatible = "regulator-fixed";
 		regulator-name = "vwl1271";
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 731ec37..8f9a69c 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -13,9 +13,9 @@
 		};
 	};
 
-	memory@0 {
+	memory@80000000 {
 		device_type = "memory";
-		reg = <0 0>;
+		reg = <0x80000000 0>;
 	};
 
 	leds {
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index 6365635..4caadb2 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -124,6 +124,7 @@
 		compatible = "ti,abe-twl6040";
 		ti,model = "omap5-uevm";
 
+		ti,jack-detection;
 		ti,mclk-freq = <19200000>;
 
 		ti,mcpdm = <&mcpdm>;
@@ -415,7 +416,7 @@
 			ti,backup-battery-charge-high-current;
 		};
 
-		gpadc {
+		gpadc: gpadc {
 			compatible = "ti,palmas-gpadc";
 			interrupts = <18 0
 				      16 0
@@ -475,8 +476,8 @@
 				smps6_reg: smps6 {
 					/* VDD_DDR3 - over VDD_SMPS6 */
 					regulator-name = "smps6";
-					regulator-min-microvolt = <1200000>;
-					regulator-max-microvolt = <1200000>;
+					regulator-min-microvolt = <1350000>;
+					regulator-max-microvolt = <1350000>;
 					regulator-always-on;
 					regulator-boot-on;
 				};
diff --git a/arch/arm/boot/dts/stih410-b2260.dts b/arch/arm/boot/dts/stih410-b2260.dts
index ef2ff2f..7fb507f 100644
--- a/arch/arm/boot/dts/stih410-b2260.dts
+++ b/arch/arm/boot/dts/stih410-b2260.dts
@@ -74,7 +74,7 @@
 		/* Low speed expansion connector */
 		spi0: spi@9844000 {
 			label = "LS-SPI0";
-			cs-gpio = <&pio30 3 0>;
+			cs-gpios = <&pio30 3 0>;
 			status = "okay";
 		};
 
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index 48fc24f..300a1bd 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -282,11 +282,15 @@
 			uart1_pins_a: uart1@0 {
 				allwinner,pins = "PG6", "PG7";
 				allwinner,function = "uart1";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
 
 			uart1_pins_cts_rts_a: uart1-cts-rts@0 {
 				allwinner,pins = "PG8", "PG9";
 				allwinner,function = "uart1";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
 
 			mmc0_pins_a: mmc0@0 {
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index bc69838..9688ec0 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -74,6 +74,26 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long
 		dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
 }
 
+void dump_backtrace_stm(u32 *stack, u32 instruction)
+{
+	char str[80], *p;
+	unsigned int x;
+	int reg;
+
+	for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
+		if (instruction & BIT(reg)) {
+			p += sprintf(p, " r%d:%08x", reg, *stack--);
+			if (++x == 6) {
+				x = 0;
+				p = str;
+				printk("%s\n", str);
+			}
+		}
+	}
+	if (p != str)
+		printk("%s\n", str);
+}
+
 #ifndef CONFIG_ARM_UNWIND
 /*
  * Stack pointers should always be within the kernels view of
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 7fa487e..37b2a11 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -3,6 +3,9 @@
  * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  */
 
+/* No __ro_after_init data in the .rodata section - which will always be ro */
+#define RO_AFTER_INIT_DATA
+
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/cache.h>
 #include <asm/thread_info.h>
@@ -223,6 +226,8 @@
 		. = ALIGN(PAGE_SIZE);
 		__init_end = .;
 
+		*(.data..ro_after_init)
+
 		NOSAVE_DATA
 		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
 		READ_MOSTLY_DATA(L1_CACHE_BYTES)
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index fab5a50..7d7952e 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -10,6 +10,7 @@
  * 27/03/03 Ian Molton Clean up CONFIG_CPU
  *
  */
+#include <linux/kern_levels.h>
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 		.text
@@ -83,13 +84,13 @@
 		teq	r3, r1, lsr #11
 		ldreq	r0, [frame, #-8]	@ get sp
 		subeq	r0, r0, #4		@ point at the last arg
-		bleq	.Ldumpstm		@ dump saved registers
+		bleq	dump_backtrace_stm	@ dump saved registers
 
 1004:		ldr	r1, [sv_pc, #0]		@ if stmfd sp!, {..., fp, ip, lr, pc}
 		ldr	r3, .Ldsi		@ instruction exists,
 		teq	r3, r1, lsr #11
 		subeq	r0, frame, #16
-		bleq	.Ldumpstm		@ dump saved registers
+		bleq	dump_backtrace_stm	@ dump saved registers
 
 		teq	sv_fp, #0		@ zero saved fp means
 		beq	no_frame		@ no further frames
@@ -112,38 +113,6 @@
 		.long	1004b, 1006b
 		.popsection
 
-#define instr r4
-#define reg   r5
-#define stack r6
-
-.Ldumpstm:	stmfd	sp!, {instr, reg, stack, r7, lr}
-		mov	stack, r0
-		mov	instr, r1
-		mov	reg, #10
-		mov	r7, #0
-1:		mov	r3, #1
- ARM(		tst	instr, r3, lsl reg	)
- THUMB(		lsl	r3, reg			)
- THUMB(		tst	instr, r3		)
-		beq	2f
-		add	r7, r7, #1
-		teq	r7, #6
-		moveq	r7, #0
-		adr	r3, .Lcr
-		addne	r3, r3, #1		@ skip newline
-		ldr	r2, [stack], #-4
-		mov	r1, reg
-		adr	r0, .Lfp
-		bl	printk
-2:		subs	reg, reg, #1
-		bpl	1b
-		teq	r7, #0
-		adrne	r0, .Lcr
-		blne	printk
-		ldmfd	sp!, {instr, reg, stack, r7, pc}
-
-.Lfp:		.asciz	" r%d:%08x%s"
-.Lcr:		.asciz	"\n"
 .Lbad:		.asciz	"Backtrace aborted due to bad frame pointer <%p>\n"
 		.align
 .Ldsi:		.word	0xe92dd800 >> 11	@ stmfd sp!, {... fp, ip, lr, pc}
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index a9afeeb..0465338 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -71,6 +71,7 @@
 	select HAVE_ARM_TWD
 	select ARM_ERRATA_754322
 	select ARM_ERRATA_775420
+	select OMAP_INTERCONNECT
 
 config SOC_DRA7XX
 	bool "TI DRA7XX"
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 2abd53a..cc6d9fa 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -205,11 +205,15 @@ void __init omap2xxx_check_revision(void)
 
 #define OMAP3_SHOW_FEATURE(feat)		\
 	if (omap3_has_ ##feat())		\
-		printk(#feat" ");
+		n += scnprintf(buf + n, sizeof(buf) - n, #feat " ");
 
 static void __init omap3_cpuinfo(void)
 {
 	const char *cpu_name;
+	char buf[64];
+	int n = 0;
+
+	memset(buf, 0, sizeof(buf));
 
 	/*
 	 * OMAP3430 and OMAP3530 are assumed to be same.
@@ -241,10 +245,10 @@ static void __init omap3_cpuinfo(void)
 		cpu_name = "OMAP3503";
 	}
 
-	sprintf(soc_name, "%s", cpu_name);
+	scnprintf(soc_name, sizeof(soc_name), "%s", cpu_name);
 
 	/* Print verbose information */
-	pr_info("%s %s (", soc_name, soc_rev);
+	n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev);
 
 	OMAP3_SHOW_FEATURE(l2cache);
 	OMAP3_SHOW_FEATURE(iva);
@@ -252,8 +256,10 @@ static void __init omap3_cpuinfo(void)
 	OMAP3_SHOW_FEATURE(neon);
 	OMAP3_SHOW_FEATURE(isp);
 	OMAP3_SHOW_FEATURE(192mhz_clk);
-
-	printk(")\n");
+	if (*(buf + n - 1) == ' ')
+		n--;
+	n += scnprintf(buf + n, sizeof(buf) - n, ")\n");
+	pr_info("%s", buf);
 }
 
 #define OMAP3_CHECK_FEATURE(status,feat)				\
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
index 62680aa..718981b 100644
--- a/arch/arm/mach-omap2/prm3xxx.c
+++ b/arch/arm/mach-omap2/prm3xxx.c
@@ -319,6 +319,9 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva)
 	if (has_uart4) {
 		en_uart4_mask = OMAP3630_EN_UART4_MASK;
 		grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK;
+	} else {
+		en_uart4_mask = 0;
+		grpsel_uart4_mask = 0;
 	}
 
 	/* Enable wakeups in PER */
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
index cba8cad..cd15dbd 100644
--- a/arch/arm/mach-omap2/voltage.c
+++ b/arch/arm/mach-omap2/voltage.c
@@ -87,6 +87,12 @@ int voltdm_scale(struct voltagedomain *voltdm,
 		return -ENODATA;
 	}
 
+	if (!voltdm->volt_data) {
+		pr_err("%s: No voltage data defined for vdd_%s\n",
+			__func__, voltdm->name);
+		return -ENODATA;
+	}
+
 	/* Adjust voltage to the exact voltage from the OPP table */
 	for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) {
 		if (voltdm->volt_data[i].volt_nominal >= target_volt) {
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7eafdef..4da3175 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1168,7 +1168,7 @@ static int __init dma_debug_do_init(void)
 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
 	return 0;
 }
-fs_initcall(dma_debug_do_init);
+core_initcall(dma_debug_do_init);
 
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
 
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index f6d333f..8dea616 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -96,7 +96,7 @@
 	ret	lr
 ENDPROC(cpu_cm7_proc_fin)
 
-	.section ".text.init", #alloc, #execinstr
+	.section ".init.text", #alloc, #execinstr
 
 __v7m_cm7_setup:
 	mov	r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index c476253..e9bd587 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -105,7 +105,7 @@
 				status = "disabled";
 			};
 
-			nb_perih_clk: nb-periph-clk@13000{
+			nb_periph_clk: nb-periph-clk@13000 {
 				compatible = "marvell,armada-3700-periph-clock-nb";
 				reg = <0x13000 0x100>;
 				clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
@@ -113,7 +113,7 @@
 				#clock-cells = <1>;
 			};
 
-			sb_perih_clk: sb-periph-clk@18000{
+			sb_periph_clk: sb-periph-clk@18000 {
 				compatible = "marvell,armada-3700-periph-clock-sb";
 				reg = <0x18000 0x100>;
 				clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 842fb33..6bf9e24 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -130,8 +130,8 @@
 				reg = <0x700600 0x50>;
 				#address-cells = <0x1>;
 				#size-cells = <0x0>;
-				cell-index = <1>;
-				clocks = <&cps_syscon0 0 3>;
+				cell-index = <3>;
+				clocks = <&cps_syscon0 1 21>;
 				status = "disabled";
 			};
 
@@ -140,7 +140,7 @@
 				reg = <0x700680 0x50>;
 				#address-cells = <1>;
 				#size-cells = <0>;
-				cell-index = <2>;
+				cell-index = <4>;
 				clocks = <&cps_syscon0 1 21>;
 				status = "disabled";
 			};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk.dtsi b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
index 310d254..4b1a2c0 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk.dtsi
+++ b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
@@ -577,6 +577,8 @@
 
 		qcom,llcc-erp {
 			compatible = "qcom,llcc-erp";
+			interrupt-names = "ecc_irq";
+			interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		qcom,llcc-amon {
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 2065f46..38b6a2b 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -46,7 +46,15 @@
 #define	ARMV8_PMU_EVTYPE_MASK	0xc800ffff	/* Mask for writable bits */
 #define	ARMV8_PMU_EVTYPE_EVENT	0xffff		/* Mask for EVENT bits */
 
-#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR	0	/* Software increment event */
+/*
+ * PMUv3 event types: required events
+ */
+#define ARMV8_PMUV3_PERFCTR_SW_INCR				0x00
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL			0x03
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE				0x04
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED				0x10
+#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES				0x11
+#define ARMV8_PMUV3_PERFCTR_BR_PRED				0x12
 
 /*
  * Event filters for PMUv3
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index cb05eb2..6d47969 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -31,17 +31,9 @@
 
 /*
  * ARMv8 PMUv3 Performance Events handling code.
- * Common event types.
+ * Common event types (some are defined in asm/perf_event.h).
  */
 
-/* Required events. */
-#define ARMV8_PMUV3_PERFCTR_SW_INCR				0x00
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL			0x03
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE				0x04
-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED				0x10
-#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES				0x11
-#define ARMV8_PMUV3_PERFCTR_BR_PRED				0x12
-
 /* At least one of the following is required. */
 #define ARMV8_PMUV3_PERFCTR_INST_RETIRED			0x08
 #define ARMV8_PMUV3_PERFCTR_INST_SPEC				0x1B
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f302fdb..87e7e66 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -597,8 +597,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 
 			idx = ARMV8_PMU_CYCLE_IDX;
 		} else {
-			BUG();
+			return false;
 		}
+	} else if (r->CRn == 0 && r->CRm == 9) {
+		/* PMCCNTR */
+		if (pmu_access_event_counter_el0_disabled(vcpu))
+			return false;
+
+		idx = ARMV8_PMU_CYCLE_IDX;
 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
 		/* PMEVCNTRn_EL0 */
 		if (pmu_access_event_counter_el0_disabled(vcpu))
@@ -606,7 +612,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 
 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
 	} else {
-		BUG();
+		return false;
 	}
 
 	if (!pmu_counter_idx_valid(vcpu, idx))
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 84d49b1..9a3eee6 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -91,7 +91,7 @@
  */
 #define LOAD_HANDLER(reg, label)					\
 	ld	reg,PACAKBASE(r13);	/* get high part of &label */	\
-	ori	reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
+	ori	reg,reg,FIXED_SYMBOL_ABS_ADDR(label);
 
 #define __LOAD_HANDLER(reg, label)					\
 	ld	reg,PACAKBASE(r13);					\
@@ -158,14 +158,17 @@ BEGIN_FTR_SECTION_NESTED(943)						\
 	std	ra,offset(r13);						\
 END_FTR_SECTION_NESTED(ftr,ftr,943)
 
-#define EXCEPTION_PROLOG_0(area)					\
-	GET_PACA(r13);							\
+#define EXCEPTION_PROLOG_0_PACA(area)					\
 	std	r9,area+EX_R9(r13);	/* save r9 */			\
 	OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);			\
 	HMT_MEDIUM;							\
 	std	r10,area+EX_R10(r13);	/* save r10 - r12 */		\
 	OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
 
+#define EXCEPTION_PROLOG_0(area)					\
+	GET_PACA(r13);							\
+	EXCEPTION_PROLOG_0_PACA(area)
+
 #define __EXCEPTION_PROLOG_1(area, extra, vec)				\
 	OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR);		\
 	OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR);		\
@@ -196,6 +199,12 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
 	EXCEPTION_PROLOG_1(area, extra, vec);				\
 	EXCEPTION_PROLOG_PSERIES_1(label, h);
 
+/* Have the PACA in r13 already */
+#define EXCEPTION_PROLOG_PSERIES_PACA(area, label, h, extra, vec)	\
+	EXCEPTION_PROLOG_0_PACA(area);					\
+	EXCEPTION_PROLOG_1(area, extra, vec);				\
+	EXCEPTION_PROLOG_PSERIES_1(label, h);
+
 #define __KVMTEST(h, n)							\
 	lbz	r10,HSTATE_IN_GUEST(r13);				\
 	cmpwi	r10,0;							\
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 0132831..c56ea8c 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -460,5 +460,6 @@
 
 #define PPC_SLBIA(IH)	stringify_in_c(.long PPC_INST_SLBIA | \
 				       ((IH & 0x7) << 21))
+#define PPC_INVALIDATE_ERAT	PPC_SLBIA(7)
 
 #endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 08ba447..1ba82ea 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -116,7 +116,9 @@
 
 EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
 	SET_SCRATCH0(r13)
-	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+	GET_PACA(r13)
+	clrrdi	r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */
+	EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD,
 				 IDLETEST, 0x100)
 
 EXC_REAL_END(system_reset, 0x100, 0x200)
@@ -124,6 +126,9 @@
 
 #ifdef CONFIG_PPC_P7_NAP
 EXC_COMMON_BEGIN(system_reset_idle_common)
+BEGIN_FTR_SECTION
+	GET_PACA(r13) /* Restore HSPRG0 to get the winkle bit in r13 */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
 	bl	pnv_restore_hyp_resource
 
 	li	r0,PNV_THREAD_RUNNING
@@ -169,7 +174,7 @@
 	SET_SCRATCH0(r13)		/* save r13 */
 	/*
 	 * Running native on arch 2.06 or later, we may wakeup from winkle
-	 * inside machine check. If yes, then last bit of HSPGR0 would be set
+	 * inside machine check. If yes, then last bit of HSPRG0 would be set
 	 * to 1. Hence clear it unconditionally.
 	 */
 	GET_PACA(r13)
@@ -388,7 +393,7 @@
 	/*
 	 * Go back to winkle. Please note that this thread was woken up in
 	 * machine check from winkle and have not restored the per-subcore
-	 * state. Hence before going back to winkle, set last bit of HSPGR0
+	 * state. Hence before going back to winkle, set last bit of HSPRG0
 	 * to 1. This will make sure that if this thread gets woken up
 	 * again at reset vector 0x100 then it will get chance to restore
 	 * the subcore state.
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ce6dc61..49a680d 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1215,7 +1215,7 @@ static void show_instructions(struct pt_regs *regs)
 		int instr;
 
 		if (!(i % 8))
-			printk("\n");
+			pr_cont("\n");
 
 #if !defined(CONFIG_BOOKE)
 		/* If executing with the IMMU off, adjust pc rather
@@ -1227,18 +1227,18 @@ static void show_instructions(struct pt_regs *regs)
 
 		if (!__kernel_text_address(pc) ||
 		     probe_kernel_address((unsigned int __user *)pc, instr)) {
-			printk(KERN_CONT "XXXXXXXX ");
+			pr_cont("XXXXXXXX ");
 		} else {
 			if (regs->nip == pc)
-				printk(KERN_CONT "<%08x> ", instr);
+				pr_cont("<%08x> ", instr);
 			else
-				printk(KERN_CONT "%08x ", instr);
+				pr_cont("%08x ", instr);
 		}
 
 		pc += sizeof(int);
 	}
 
-	printk("\n");
+	pr_cont("\n");
 }
 
 struct regbit {
@@ -1282,7 +1282,7 @@ static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
 
 	for (; bits->bit; ++bits)
 		if (val & bits->bit) {
-			printk("%s%s", s, bits->name);
+			pr_cont("%s%s", s, bits->name);
 			s = sep;
 		}
 }
@@ -1305,9 +1305,9 @@ static void print_tm_bits(unsigned long val)
  *   T: Transactional	(bit 34)
  */
 	if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
-		printk(",TM[");
+		pr_cont(",TM[");
 		print_bits(val, msr_tm_bits, "");
-		printk("]");
+		pr_cont("]");
 	}
 }
 #else
@@ -1316,10 +1316,10 @@ static void print_tm_bits(unsigned long val) {}
 
 static void print_msr_bits(unsigned long val)
 {
-	printk("<");
+	pr_cont("<");
 	print_bits(val, msr_bits, ",");
 	print_tm_bits(val);
-	printk(">");
+	pr_cont(">");
 }
 
 #ifdef CONFIG_PPC64
@@ -1347,29 +1347,29 @@ void show_regs(struct pt_regs * regs)
 	printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
 	trap = TRAP(regs);
 	if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
-		printk("CFAR: "REG" ", regs->orig_gpr3);
+		pr_cont("CFAR: "REG" ", regs->orig_gpr3);
 	if (trap == 0x200 || trap == 0x300 || trap == 0x600)
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
-		printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
+		pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
 #else
-		printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
+		pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
 #endif
 #ifdef CONFIG_PPC64
-	printk("SOFTE: %ld ", regs->softe);
+	pr_cont("SOFTE: %ld ", regs->softe);
 #endif
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 	if (MSR_TM_ACTIVE(regs->msr))
-		printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
+		pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
 #endif
 
 	for (i = 0;  i < 32;  i++) {
 		if ((i % REGS_PER_LINE) == 0)
-			printk("\nGPR%02d: ", i);
-		printk(REG " ", regs->gpr[i]);
+			pr_cont("\nGPR%02d: ", i);
+		pr_cont(REG " ", regs->gpr[i]);
 		if (i == LAST_VOLATILE && !FULL_REGS(regs))
 			break;
 	}
-	printk("\n");
+	pr_cont("\n");
 #ifdef CONFIG_KALLSYMS
 	/*
 	 * Lookup NIP late so we have the best change of getting the
@@ -1900,14 +1900,14 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 			if ((ip == rth) && curr_frame >= 0) {
-				printk(" (%pS)",
+				pr_cont(" (%pS)",
 				       (void *)current->ret_stack[curr_frame].ret);
 				curr_frame--;
 			}
 #endif
 			if (firstframe)
-				printk(" (unreliable)");
-			printk("\n");
+				pr_cont(" (unreliable)");
+			pr_cont("\n");
 		}
 		firstframe = 0;
 
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 7ac8e6e..8d586cf 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -226,17 +226,25 @@ static void __init configure_exceptions(void)
 		if (firmware_has_feature(FW_FEATURE_OPAL))
 			opal_configure_cores();
 
-		/* Enable AIL if supported, and we are in hypervisor mode */
-		if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
-		    early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
-			unsigned long lpcr = mfspr(SPRN_LPCR);
-			mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
-		}
+		/* AIL on native is done in cpu_ready_for_interrupts() */
 	}
 }
 
 static void cpu_ready_for_interrupts(void)
 {
+	/*
+	 * Enable AIL if supported, and we are in hypervisor mode. This
+	 * is called once for every processor.
+	 *
+	 * If we are not in hypervisor mode the job is done once for
+	 * the whole partition in configure_exceptions().
+	 */
+	if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
+	    early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
+		unsigned long lpcr = mfspr(SPRN_LPCR);
+		mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+	}
+
 	/* Set IR and DR in PACA MSR */
 	get_paca()->kernel_msr = MSR_KERNEL;
 }
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 44d3c3a..5503078 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1029,6 +1029,10 @@ void hash__early_init_mmu_secondary(void)
 {
 	/* Initialize hash table for that CPU */
 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+
+		if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+			update_hid_for_hash();
+
 		if (!cpu_has_feature(CPU_FTR_ARCH_300))
 			mtspr(SPRN_SDR1, _SDR1);
 		else
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index ed7bddc..688b545 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -388,6 +388,10 @@ void radix__early_init_mmu_secondary(void)
 	 * update partition table control register and UPRT
 	 */
 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+
+		if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+			update_hid_for_radix();
+
 		lpcr = mfspr(SPRN_LPCR);
 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
 
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index bda8c43..3493cf4 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -50,6 +50,8 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
 	for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
 		__tlbiel_pid(pid, set, ric);
 	}
+	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+		asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
 	return;
 }
 
@@ -83,6 +85,8 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
 	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
 	asm volatile("ptesync": : :"memory");
+	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+		asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
 }
 
 static inline void _tlbie_va(unsigned long va, unsigned long pid,
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
index 6160761..4810e48 100644
--- a/arch/tile/include/asm/cache.h
+++ b/arch/tile/include/asm/cache.h
@@ -61,4 +61,7 @@
  */
 #define __write_once __read_mostly
 
+/* __ro_after_init is the generic name for the tile arch __write_once. */
+#define __ro_after_init __read_mostly
+
 #endif /* _ASM_TILE_CACHE_H */
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 5f845ee..81195cc 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -8,8 +8,12 @@
 #define PCI_DEVICE_ID_INTEL_HSW_IMC	0x0c00
 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC	0x0a04
 #define PCI_DEVICE_ID_INTEL_BDW_IMC	0x1604
-#define PCI_DEVICE_ID_INTEL_SKL_IMC	0x191f
-#define PCI_DEVICE_ID_INTEL_SKL_U_IMC	0x190c
+#define PCI_DEVICE_ID_INTEL_SKL_U_IMC	0x1904
+#define PCI_DEVICE_ID_INTEL_SKL_Y_IMC	0x190c
+#define PCI_DEVICE_ID_INTEL_SKL_HD_IMC	0x1900
+#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC	0x1910
+#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC	0x190f
+#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC	0x191f
 
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK			0x000000ff
@@ -616,13 +620,29 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = {
 
 static const struct pci_device_id skl_uncore_pci_ids[] = {
 	{ /* IMC */
-		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 	},
 	{ /* IMC */
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 	},
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
 
 	{ /* end: all zeroes */ },
 };
@@ -666,8 +686,12 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
 	IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
 	IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
 	IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
-	IMC_DEV(SKL_IMC, &skl_uncore_pci_driver),    /* 6th Gen Core */
+	IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
 	IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
+	IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
+	IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
+	IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
+	IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
 	{  /* end marker */ }
 };
 
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index 5b6753d..49da9f4 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -17,6 +17,7 @@
 
 extern int intel_mid_pci_init(void);
 extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
+extern pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev);
 
 extern void intel_mid_pwr_power_off(void);
 
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index b81fe2d..1e81a37 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
 #ifdef CONFIG_SMP
 	unsigned bits;
 	int cpu = smp_processor_id();
-	unsigned int socket_id, core_complex_id;
 
 	bits = c->x86_coreid_bits;
 	/* Low order bits define the core id (index of core in socket) */
@@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
 	 if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
 		return;
 
-	socket_id	= (c->apicid >> bits) - 1;
-	core_complex_id	= (c->apicid & ((1 << bits) - 1)) >> 3;
-
-	per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
+	per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
 #endif
 }
 
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 9bd910a..cc9e980 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -979,6 +979,35 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c)
 }
 
 /*
+ * The physical to logical package id mapping is initialized from the
+ * acpi/mptables information. Make sure that CPUID actually agrees with
+ * that.
+ */
+static void sanitize_package_id(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+	unsigned int pkg, apicid, cpu = smp_processor_id();
+
+	apicid = apic->cpu_present_to_apicid(cpu);
+	pkg = apicid >> boot_cpu_data.x86_coreid_bits;
+
+	if (apicid != c->initial_apicid) {
+		pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n",
+		       cpu, apicid, c->initial_apicid);
+		c->initial_apicid = apicid;
+	}
+	if (pkg != c->phys_proc_id) {
+		pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n",
+		       cpu, pkg, c->phys_proc_id);
+		c->phys_proc_id = pkg;
+	}
+	c->logical_proc_id = topology_phys_to_logical_pkg(pkg);
+#else
+	c->logical_proc_id = 0;
+#endif
+}
+
+/*
  * This does the hard work of actually picking apart the CPU stuff...
  */
 static void identify_cpu(struct cpuinfo_x86 *c)
@@ -1103,8 +1132,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
 #ifdef CONFIG_NUMA
 	numa_add_cpu(smp_processor_id());
 #endif
-	/* The boot/hotplug time assigment got cleared, restore it */
-	c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
+	sanitize_package_id(c);
 }
 
 /*
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 25810b1..4da0303 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -156,6 +156,16 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
 }
 
 
+static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
+		    struct kvm *kvm, int irq_source_id, int level,
+		    bool line_status)
+{
+	if (!level)
+		return -1;
+
+	return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
+}
+
 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
 			      struct kvm *kvm, int irq_source_id, int level,
 			      bool line_status)
@@ -163,18 +173,26 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
 	struct kvm_lapic_irq irq;
 	int r;
 
-	if (unlikely(e->type != KVM_IRQ_ROUTING_MSI))
-		return -EWOULDBLOCK;
+	switch (e->type) {
+	case KVM_IRQ_ROUTING_HV_SINT:
+		return kvm_hv_set_sint(e, kvm, irq_source_id, level,
+				       line_status);
 
-	if (kvm_msi_route_invalid(kvm, e))
-		return -EINVAL;
+	case KVM_IRQ_ROUTING_MSI:
+		if (kvm_msi_route_invalid(kvm, e))
+			return -EINVAL;
 
-	kvm_set_msi_irq(kvm, e, &irq);
+		kvm_set_msi_irq(kvm, e, &irq);
 
-	if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
-		return r;
-	else
-		return -EWOULDBLOCK;
+		if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
+			return r;
+		break;
+
+	default:
+		break;
+	}
+
+	return -EWOULDBLOCK;
 }
 
 int kvm_request_irq_source_id(struct kvm *kvm)
@@ -254,16 +272,6 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
 	srcu_read_unlock(&kvm->irq_srcu, idx);
 }
 
-static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
-		    struct kvm *kvm, int irq_source_id, int level,
-		    bool line_status)
-{
-	if (!level)
-		return -1;
-
-	return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
-}
-
 int kvm_set_routing_entry(struct kvm *kvm,
 			  struct kvm_kernel_irq_routing_entry *e,
 			  const struct kvm_irq_routing_entry *ue)
@@ -423,18 +431,6 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
 	srcu_read_unlock(&kvm->irq_srcu, idx);
 }
 
-int kvm_arch_set_irq(struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm,
-		     int irq_source_id, int level, bool line_status)
-{
-	switch (irq->type) {
-	case KVM_IRQ_ROUTING_HV_SINT:
-		return kvm_hv_set_sint(irq, kvm, irq_source_id, level,
-				       line_status);
-	default:
-		return -EWOULDBLOCK;
-	}
-}
-
 void kvm_arch_irq_routing_update(struct kvm *kvm)
 {
 	kvm_hv_irq_routing_update(kvm);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3017de0..04c5d96 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
 	struct kvm_shared_msrs *locals
 		= container_of(urn, struct kvm_shared_msrs, urn);
 	struct kvm_shared_msr_values *values;
+	unsigned long flags;
 
+	/*
+	 * Disabling irqs at this point since the following code could be
+	 * interrupted and executed through kvm_arch_hardware_disable()
+	 */
+	local_irq_save(flags);
+	if (locals->registered) {
+		locals->registered = false;
+		user_return_notifier_unregister(urn);
+	}
+	local_irq_restore(flags);
 	for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
 		values = &locals->values[slot];
 		if (values->host != values->curr) {
@@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
 			values->curr = values->host;
 		}
 	}
-	locals->registered = false;
-	user_return_notifier_unregister(urn);
 }
 
 static void shared_msr_update(unsigned slot, u32 msr)
@@ -1724,18 +1733,23 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
 
 static u64 __get_kvmclock_ns(struct kvm *kvm)
 {
-	struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0);
 	struct kvm_arch *ka = &kvm->arch;
-	s64 ns;
+	struct pvclock_vcpu_time_info hv_clock;
 
-	if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) {
-		u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
-		ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc);
-	} else {
-		ns = ktime_get_boot_ns() + ka->kvmclock_offset;
+	spin_lock(&ka->pvclock_gtod_sync_lock);
+	if (!ka->use_master_clock) {
+		spin_unlock(&ka->pvclock_gtod_sync_lock);
+		return ktime_get_boot_ns() + ka->kvmclock_offset;
 	}
 
-	return ns;
+	hv_clock.tsc_timestamp = ka->master_cycle_now;
+	hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
+	spin_unlock(&ka->pvclock_gtod_sync_lock);
+
+	kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
+			   &hv_clock.tsc_shift,
+			   &hv_clock.tsc_to_system_mul);
+	return __pvclock_read_cycles(&hv_clock, rdtsc());
 }
 
 u64 get_kvmclock_ns(struct kvm *kvm)
@@ -2596,7 +2610,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 	case KVM_CAP_PIT_STATE2:
 	case KVM_CAP_SET_IDENTITY_MAP_ADDR:
 	case KVM_CAP_XEN_HVM:
-	case KVM_CAP_ADJUST_CLOCK:
 	case KVM_CAP_VCPU_EVENTS:
 	case KVM_CAP_HYPERV:
 	case KVM_CAP_HYPERV_VAPIC:
@@ -2623,6 +2636,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 #endif
 		r = 1;
 		break;
+	case KVM_CAP_ADJUST_CLOCK:
+		r = KVM_CLOCK_TSC_STABLE;
+		break;
 	case KVM_CAP_X86_SMM:
 		/* SMBASE is usually relocated above 1M on modern chipsets,
 		 * and SMM handlers might indeed rely on 4G segment limits,
@@ -3415,6 +3431,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 	};
 	case KVM_SET_VAPIC_ADDR: {
 		struct kvm_vapic_addr va;
+		int idx;
 
 		r = -EINVAL;
 		if (!lapic_in_kernel(vcpu))
@@ -3422,7 +3439,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 		r = -EFAULT;
 		if (copy_from_user(&va, argp, sizeof va))
 			goto out;
+		idx = srcu_read_lock(&vcpu->kvm->srcu);
 		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+		srcu_read_unlock(&vcpu->kvm->srcu, idx);
 		break;
 	}
 	case KVM_X86_SETUP_MCE: {
@@ -4103,9 +4122,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
 		struct kvm_clock_data user_ns;
 		u64 now_ns;
 
-		now_ns = get_kvmclock_ns(kvm);
+		local_irq_disable();
+		now_ns = __get_kvmclock_ns(kvm);
 		user_ns.clock = now_ns;
-		user_ns.flags = 0;
+		user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
+		local_irq_enable();
 		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
 
 		r = -EFAULT;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index bf99aa7..936a488 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -861,7 +861,7 @@ static void __init __efi_enter_virtual_mode(void)
 	int count = 0, pg_shift = 0;
 	void *new_memmap = NULL;
 	efi_status_t status;
-	phys_addr_t pa;
+	unsigned long pa;
 
 	efi.systab = NULL;
 
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 58b0f80..319148b 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -31,6 +31,7 @@
 #include <linux/io.h>
 #include <linux/reboot.h>
 #include <linux/slab.h>
+#include <linux/ucs2_string.h>
 
 #include <asm/setup.h>
 #include <asm/page.h>
@@ -211,6 +212,35 @@ void efi_sync_low_kernel_mappings(void)
 	memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
 }
 
+/*
+ * Wrapper for slow_virt_to_phys() that handles NULL addresses.
+ */
+static inline phys_addr_t
+virt_to_phys_or_null_size(void *va, unsigned long size)
+{
+	bool bad_size;
+
+	if (!va)
+		return 0;
+
+	if (virt_addr_valid(va))
+		return virt_to_phys(va);
+
+	/*
+	 * A fully aligned variable on the stack is guaranteed not to
+	 * cross a page bounary. Try to catch strings on the stack by
+	 * checking that 'size' is a power of two.
+	 */
+	bad_size = size > PAGE_SIZE || !is_power_of_2(size);
+
+	WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
+
+	return slow_virt_to_phys(va);
+}
+
+#define virt_to_phys_or_null(addr)				\
+	virt_to_phys_or_null_size((addr), sizeof(*(addr)))
+
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
 	unsigned long pfn, text;
@@ -494,8 +524,8 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 
 	spin_lock(&rtc_lock);
 
-	phys_tm = virt_to_phys(tm);
-	phys_tc = virt_to_phys(tc);
+	phys_tm = virt_to_phys_or_null(tm);
+	phys_tc = virt_to_phys_or_null(tc);
 
 	status = efi_thunk(get_time, phys_tm, phys_tc);
 
@@ -511,7 +541,7 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
 
 	spin_lock(&rtc_lock);
 
-	phys_tm = virt_to_phys(tm);
+	phys_tm = virt_to_phys_or_null(tm);
 
 	status = efi_thunk(set_time, phys_tm);
 
@@ -529,9 +559,9 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
 
 	spin_lock(&rtc_lock);
 
-	phys_enabled = virt_to_phys(enabled);
-	phys_pending = virt_to_phys(pending);
-	phys_tm = virt_to_phys(tm);
+	phys_enabled = virt_to_phys_or_null(enabled);
+	phys_pending = virt_to_phys_or_null(pending);
+	phys_tm = virt_to_phys_or_null(tm);
 
 	status = efi_thunk(get_wakeup_time, phys_enabled,
 			     phys_pending, phys_tm);
@@ -549,7 +579,7 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 
 	spin_lock(&rtc_lock);
 
-	phys_tm = virt_to_phys(tm);
+	phys_tm = virt_to_phys_or_null(tm);
 
 	status = efi_thunk(set_wakeup_time, enabled, phys_tm);
 
@@ -558,6 +588,10 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 	return status;
 }
 
+static unsigned long efi_name_size(efi_char16_t *name)
+{
+	return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
+}
 
 static efi_status_t
 efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
@@ -567,11 +601,11 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
 	u32 phys_name, phys_vendor, phys_attr;
 	u32 phys_data_size, phys_data;
 
-	phys_data_size = virt_to_phys(data_size);
-	phys_vendor = virt_to_phys(vendor);
-	phys_name = virt_to_phys(name);
-	phys_attr = virt_to_phys(attr);
-	phys_data = virt_to_phys(data);
+	phys_data_size = virt_to_phys_or_null(data_size);
+	phys_vendor = virt_to_phys_or_null(vendor);
+	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+	phys_attr = virt_to_phys_or_null(attr);
+	phys_data = virt_to_phys_or_null_size(data, *data_size);
 
 	status = efi_thunk(get_variable, phys_name, phys_vendor,
 			   phys_attr, phys_data_size, phys_data);
@@ -586,9 +620,9 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
 	u32 phys_name, phys_vendor, phys_data;
 	efi_status_t status;
 
-	phys_name = virt_to_phys(name);
-	phys_vendor = virt_to_phys(vendor);
-	phys_data = virt_to_phys(data);
+	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+	phys_vendor = virt_to_phys_or_null(vendor);
+	phys_data = virt_to_phys_or_null_size(data, data_size);
 
 	/* If data_size is > sizeof(u32) we've got problems */
 	status = efi_thunk(set_variable, phys_name, phys_vendor,
@@ -605,9 +639,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
 	efi_status_t status;
 	u32 phys_name_size, phys_name, phys_vendor;
 
-	phys_name_size = virt_to_phys(name_size);
-	phys_vendor = virt_to_phys(vendor);
-	phys_name = virt_to_phys(name);
+	phys_name_size = virt_to_phys_or_null(name_size);
+	phys_vendor = virt_to_phys_or_null(vendor);
+	phys_name = virt_to_phys_or_null_size(name, *name_size);
 
 	status = efi_thunk(get_next_variable, phys_name_size,
 			   phys_name, phys_vendor);
@@ -621,7 +655,7 @@ efi_thunk_get_next_high_mono_count(u32 *count)
 	efi_status_t status;
 	u32 phys_count;
 
-	phys_count = virt_to_phys(count);
+	phys_count = virt_to_phys_or_null(count);
 	status = efi_thunk(get_next_high_mono_count, phys_count);
 
 	return status;
@@ -633,7 +667,7 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
 {
 	u32 phys_data;
 
-	phys_data = virt_to_phys(data);
+	phys_data = virt_to_phys_or_null_size(data, data_size);
 
 	efi_thunk(reset_system, reset_type, status, data_size, phys_data);
 }
@@ -661,9 +695,9 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
 	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
 		return EFI_UNSUPPORTED;
 
-	phys_storage = virt_to_phys(storage_space);
-	phys_remaining = virt_to_phys(remaining_space);
-	phys_max = virt_to_phys(max_variable_size);
+	phys_storage = virt_to_phys_or_null(storage_space);
+	phys_remaining = virt_to_phys_or_null(remaining_space);
+	phys_max = virt_to_phys_or_null(max_variable_size);
 
 	status = efi_thunk(query_variable_info, attr, phys_storage,
 			   phys_remaining, phys_max);
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c
index 5d3b45a..67375dd 100644
--- a/arch/x86/platform/intel-mid/pwr.c
+++ b/arch/x86/platform/intel-mid/pwr.c
@@ -272,6 +272,25 @@ int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
 }
 EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
 
+pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev)
+{
+	struct mid_pwr *pwr = midpwr;
+	int id, reg, bit;
+	u32 power;
+
+	if (!pwr || !pwr->available)
+		return PCI_UNKNOWN;
+
+	id = intel_mid_pwr_get_lss_id(pdev);
+	if (id < 0)
+		return PCI_UNKNOWN;
+
+	reg = (id * LSS_PWS_BITS) / 32;
+	bit = (id * LSS_PWS_BITS) % 32;
+	power = mid_pwr_get_state(pwr, reg);
+	return (__force pci_power_t)((power >> bit) & 3);
+}
+
 void intel_mid_pwr_power_off(void)
 {
 	struct mid_pwr *pwr = midpwr;
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index ac58c16..555b9fa 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -16,6 +16,7 @@
 
 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
 KBUILD_CFLAGS += -m$(BITS)
+KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
 
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
 		$(call if_changed,ld)
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index de9b14b..cd400af 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -767,7 +767,14 @@ __SYSCALL(346, sys_preadv2, 6)
 #define __NR_pwritev2				347
 __SYSCALL(347, sys_pwritev2, 6)
 
-#define __NR_syscall_count			348
+#define __NR_pkey_mprotect			348
+__SYSCALL(348, sys_pkey_mprotect, 4)
+#define __NR_pkey_alloc				349
+__SYSCALL(349, sys_pkey_alloc, 2)
+#define __NR_pkey_free				350
+__SYSCALL(350, sys_pkey_free, 1)
+
+#define __NR_syscall_count			351
 
 /*
  * sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 9a5bcd0..be81e69 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -172,10 +172,11 @@ void __init time_init(void)
 {
 	of_clk_init(NULL);
 #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
-	printk("Calibrating CPU frequency ");
+	pr_info("Calibrating CPU frequency ");
 	calibrate_ccount();
-	printk("%d.%02d MHz\n", (int)ccount_freq/1000000,
-			(int)(ccount_freq/10000)%100);
+	pr_cont("%d.%02d MHz\n",
+		(int)ccount_freq / 1000000,
+		(int)(ccount_freq / 10000) % 100);
 #else
 	ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
 #endif
@@ -210,9 +211,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
 void calibrate_delay(void)
 {
 	loops_per_jiffy = ccount_freq / HZ;
-	printk("Calibrating delay loop (skipped)... "
-	       "%lu.%02lu BogoMIPS preset\n",
-	       loops_per_jiffy/(1000000/HZ),
-	       (loops_per_jiffy/(10000/HZ)) % 100);
+	pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n",
+		loops_per_jiffy / (1000000 / HZ),
+		(loops_per_jiffy / (10000 / HZ)) % 100);
 }
 #endif
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index d02fc30..ce37d5b 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -465,26 +465,25 @@ void show_regs(struct pt_regs * regs)
 
 	for (i = 0; i < 16; i++) {
 		if ((i % 8) == 0)
-			printk(KERN_INFO "a%02d:", i);
-		printk(KERN_CONT " %08lx", regs->areg[i]);
+			pr_info("a%02d:", i);
+		pr_cont(" %08lx", regs->areg[i]);
 	}
-	printk(KERN_CONT "\n");
-
-	printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
-	       regs->pc, regs->ps, regs->depc, regs->excvaddr);
-	printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
-	       regs->lbeg, regs->lend, regs->lcount, regs->sar);
+	pr_cont("\n");
+	pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
+		regs->pc, regs->ps, regs->depc, regs->excvaddr);
+	pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
+		regs->lbeg, regs->lend, regs->lcount, regs->sar);
 	if (user_mode(regs))
-		printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
-		       regs->windowbase, regs->windowstart, regs->wmask,
-		       regs->syscall);
+		pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
+			regs->windowbase, regs->windowstart, regs->wmask,
+			regs->syscall);
 }
 
 static int show_trace_cb(struct stackframe *frame, void *data)
 {
 	if (kernel_text_address(frame->pc)) {
-		printk(" [<%08lx>] ", frame->pc);
-		print_symbol("%s\n", frame->pc);
+		pr_cont(" [<%08lx>]", frame->pc);
+		print_symbol(" %s\n", frame->pc);
 	}
 	return 0;
 }
@@ -494,19 +493,13 @@ void show_trace(struct task_struct *task, unsigned long *sp)
 	if (!sp)
 		sp = stack_pointer(task);
 
-	printk("Call Trace:");
-#ifdef CONFIG_KALLSYMS
-	printk("\n");
-#endif
+	pr_info("Call Trace:\n");
 	walk_stackframe(sp, show_trace_cb, NULL);
-	printk("\n");
+#ifndef CONFIG_KALLSYMS
+	pr_cont("\n");
+#endif
 }
 
-/*
- * This routine abuses get_user()/put_user() to reference pointers
- * with at least a bit of error checking ...
- */
-
 static int kstack_depth_to_print = 24;
 
 void show_stack(struct task_struct *task, unsigned long *sp)
@@ -518,52 +511,29 @@ void show_stack(struct task_struct *task, unsigned long *sp)
 		sp = stack_pointer(task);
 	stack = sp;
 
-	printk("\nStack: ");
+	pr_info("Stack:\n");
 
 	for (i = 0; i < kstack_depth_to_print; i++) {
 		if (kstack_end(sp))
 			break;
-		if (i && ((i % 8) == 0))
-			printk("\n       ");
-		printk("%08lx ", *sp++);
+		pr_cont(" %08lx", *sp++);
+		if (i % 8 == 7)
+			pr_cont("\n");
 	}
-	printk("\n");
 	show_trace(task, stack);
 }
 
-void show_code(unsigned int *pc)
-{
-	long i;
-
-	printk("\nCode:");
-
-	for(i = -3 ; i < 6 ; i++) {
-		unsigned long insn;
-		if (__get_user(insn, pc + i)) {
-			printk(" (Bad address in pc)\n");
-			break;
-		}
-		printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
-	}
-}
-
 DEFINE_SPINLOCK(die_lock);
 
 void die(const char * str, struct pt_regs * regs, long err)
 {
 	static int die_counter;
-	int nl = 0;
 
 	console_verbose();
 	spin_lock_irq(&die_lock);
 
-	printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
-#ifdef CONFIG_PREEMPT
-	printk("PREEMPT ");
-	nl = 1;
-#endif
-	if (nl)
-		printk("\n");
+	pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter,
+		IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "");
 	show_regs(regs);
 	if (!user_mode(regs))
 		show_stack(NULL, (unsigned long*)regs->areg[1]);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 2d8466f..05e21b4 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -214,23 +214,26 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 
 	ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
 
-	if (ctx->more) {
+	if (!result) {
+		err = af_alg_wait_for_completion(
+				crypto_ahash_init(&ctx->req),
+				&ctx->completion);
+		if (err)
+			goto unlock;
+	}
+
+	if (!result || ctx->more) {
 		ctx->more = 0;
 		err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
 						 &ctx->completion);
 		if (err)
 			goto unlock;
-	} else if (!result) {
-		err = af_alg_wait_for_completion(
-				crypto_ahash_digest(&ctx->req),
-				&ctx->completion);
 	}
 
 	err = memcpy_to_msg(msg, ctx->result, len);
 
-	hash_free_result(sk, ctx);
-
 unlock:
+	hash_free_result(sk, ctx);
 	release_sock(sk);
 
 	return err ?: len;
diff --git a/drivers/Kconfig b/drivers/Kconfig
index e1e2066..cc11302 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -112,6 +112,8 @@
 
 source "drivers/rtc/Kconfig"
 
+source "drivers/esoc/Kconfig"
+
 source "drivers/dma/Kconfig"
 
 source "drivers/dma-buf/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 194d20b..cf40194 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -172,4 +172,5 @@
 obj-$(CONFIG_STM)		+= hwtracing/stm/
 obj-$(CONFIG_ANDROID)		+= android/
 obj-$(CONFIG_NVMEM)		+= nvmem/
+obj-$(CONFIG_ESOC)              += esoc/
 obj-$(CONFIG_FPGA)		+= fpga/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 046c4d0..5fb838e 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -480,19 +480,17 @@ static void acpi_tb_convert_fadt(void)
 	u32 i;
 
 	/*
-	 * For ACPI 1.0 FADTs (revision 1), ensure that reserved fields which
+	 * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which
 	 * should be zero are indeed zero. This will workaround BIOSs that
 	 * inadvertently place values in these fields.
 	 *
 	 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located
 	 * at offset 45, 55, 95, and the word located at offset 109, 110.
 	 *
-	 * Note: The FADT revision value is unreliable because of BIOS errors.
-	 * The table length is instead used as the final word on the version.
-	 *
-	 * Note: FADT revision 3 is the ACPI 2.0 version of the FADT.
+	 * Note: The FADT revision value is unreliable. Only the length can be
+	 * trusted.
 	 */
-	if (acpi_gbl_FADT.header.length <= ACPI_FADT_V3_SIZE) {
+	if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) {
 		acpi_gbl_FADT.preferred_profile = 0;
 		acpi_gbl_FADT.pstate_control = 0;
 		acpi_gbl_FADT.cst_control = 0;
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index b49e613..fc9e889 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -484,7 +484,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id bt_bmc_match[] = {
-	{ .compatible = "aspeed,ast2400-bt-bmc" },
+	{ .compatible = "aspeed,ast2400-ibt-bmc" },
 	{ },
 };
 
@@ -502,4 +502,4 @@ module_platform_driver(bt_bmc_driver);
 MODULE_DEVICE_TABLE(of, bt_bmc_match);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
-MODULE_DESCRIPTION("Linux device interface to the BT interface");
+MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface");
diff --git a/drivers/clk/qcom/clk-dummy.c b/drivers/clk/qcom/clk-dummy.c
index 6334b10..e2465c4 100644
--- a/drivers/clk/qcom/clk-dummy.c
+++ b/drivers/clk/qcom/clk-dummy.c
@@ -15,13 +15,8 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <linux/reset-controller.h>
 
-struct clk_dummy {
-	struct clk_hw hw;
-	struct reset_controller_dev reset;
-	unsigned long rrate;
-};
+#include "common.h"
 
 #define to_clk_dummy(_hw)	container_of(_hw, struct clk_dummy, hw)
 
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index b904c33..acbe793 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -157,6 +157,7 @@ extern const struct clk_ops clk_dyn_rcg_ops;
  * @parent_map: map from software's parent index to hardware's src_sel field
  * @freq_tbl: frequency table
  * @current_freq: last cached frequency when using branches with shared RCGs
+ * @enable_safe_config: When set, the RCG is parked at CXO when it's disabled
  * @clkr: regmap clock handle
  *
  */
@@ -167,6 +168,7 @@ struct clk_rcg2 {
 	const struct parent_map	*parent_map;
 	const struct freq_tbl	*freq_tbl;
 	unsigned long		current_freq;
+	bool			enable_safe_config;
 	struct clk_regmap	clkr;
 };
 
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 9438822..590cf45 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -20,6 +20,7 @@
 #include <linux/delay.h>
 #include <linux/regmap.h>
 #include <linux/math64.h>
+#include <linux/clk.h>
 
 #include <asm/div64.h>
 
@@ -48,6 +49,14 @@
 #define N_REG			0xc
 #define D_REG			0x10
 
+static struct freq_tbl cxo_f = {
+	.freq = 19200000,
+	.src = 0,
+	.pre_div = 1,
+	.m = 0,
+	.n = 0,
+};
+
 static int clk_rcg2_is_enabled(struct clk_hw *hw)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -125,6 +134,35 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
 	return update_config(rcg);
 }
 
+static int clk_rcg2_set_force_enable(struct clk_hw *hw)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	int ret = 0, count = 500;
+
+	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+					CMD_ROOT_EN, CMD_ROOT_EN);
+	if (ret)
+		return ret;
+
+	for (; count > 0; count--) {
+		if (clk_rcg2_is_enabled(hw))
+			return ret;
+		/* Delay for 1usec and retry polling the status bit */
+		udelay(1);
+	}
+
+	WARN(1, "%s: rcg didn't turn on.", clk_hw_get_name(hw));
+	return ret;
+}
+
+static void clk_rcg2_clear_force_enable(struct clk_hw *hw)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+	regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+					CMD_ROOT_EN, 0);
+}
+
 /*
  * Calculate m/n:d rate
  *
@@ -156,6 +194,12 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
 	u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
 
+	if (rcg->enable_safe_config && !clk_hw_is_prepared(hw)) {
+		if (!rcg->current_freq)
+			rcg->current_freq = cxo_f.freq;
+		return rcg->current_freq;
+	}
+
 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
 
 	if (rcg->mnd_width) {
@@ -275,6 +319,15 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
 	if (!f)
 		return -EINVAL;
 
+	/*
+	 * Return if the RCG is currently disabled. This configuration update
+	 * will happen as part of the RCG enable sequence.
+	 */
+	if (rcg->enable_safe_config && !clk_hw_is_prepared(hw)) {
+		rcg->current_freq = rate;
+		return 0;
+	}
+
 	return clk_rcg2_configure(rcg, f);
 }
 
@@ -290,8 +343,65 @@ static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
 	return __clk_rcg2_set_rate(hw, rate);
 }
 
+static int clk_rcg2_enable(struct clk_hw *hw)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	unsigned long rate = clk_get_rate(hw->clk);
+	const struct freq_tbl *f;
+
+	if (!rcg->enable_safe_config)
+		return 0;
+
+	/*
+	 * Switch from CXO to the stashed mux selection. Force enable and
+	 * disable the RCG while configuring it to safeguard against any update
+	 * signal coming from the downstream clock. The current parent has
+	 * already been prepared and enabled at this point, and the CXO source
+	 * is always on while APPS is online. Therefore, the RCG can safely be
+	 * switched.
+	 */
+	f = qcom_find_freq(rcg->freq_tbl, rate);
+	if (!f)
+		return -EINVAL;
+
+	clk_rcg2_set_force_enable(hw);
+	clk_rcg2_configure(rcg, f);
+	clk_rcg2_clear_force_enable(hw);
+
+	return 0;
+}
+
+static void clk_rcg2_disable(struct clk_hw *hw)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+	if (!rcg->enable_safe_config)
+		return;
+	/*
+	 * Park the RCG at a safe configuration - sourced off the CXO. This is
+	 * needed for 2 reasons: In the case of RCGs sourcing PSCBCs, due to a
+	 * default HW behavior, the RCG will turn on when its corresponding
+	 * GDSC is enabled. We might also have cases when the RCG might be left
+	 * enabled without the overlying SW knowing about it. This results from
+	 * hard to track cases of downstream clocks being left enabled. In both
+	 * these cases, scaling the RCG will fail since it's enabled but with
+	 * its sources cut off.
+	 *
+	 * Save mux select and switch to CXO. Force enable and disable the RCG
+	 * while configuring it to safeguard against any update signal coming
+	 * from the downstream clock. The current parent is still prepared and
+	 * enabled at this point, and the CXO source is always on while APPS is
+	 * online. Therefore, the RCG can safely be switched.
+	 */
+	clk_rcg2_set_force_enable(hw);
+	clk_rcg2_configure(rcg, &cxo_f);
+	clk_rcg2_clear_force_enable(hw);
+}
+
 const struct clk_ops clk_rcg2_ops = {
 	.is_enabled = clk_rcg2_is_enabled,
+	.enable = clk_rcg2_enable,
+	.disable = clk_rcg2_disable,
 	.get_parent = clk_rcg2_get_parent,
 	.set_parent = clk_rcg2_set_parent,
 	.recalc_rate = clk_rcg2_recalc_rate,
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 8879f19..eface18 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -13,6 +13,8 @@
 #ifndef __QCOM_CLK_COMMON_H__
 #define __QCOM_CLK_COMMON_H__
 
+#include <linux/reset-controller.h>
+
 struct platform_device;
 struct regmap_config;
 struct clk_regmap;
@@ -32,6 +34,12 @@ struct qcom_cc_desc {
 	size_t num_gdscs;
 };
 
+struct clk_dummy {
+	struct clk_hw hw;
+	struct reset_controller_dev reset;
+	unsigned long rrate;
+};
+
 extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
 					     unsigned long rate);
 extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 156aad1..954a64c 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -137,7 +137,7 @@ static void dbg_dump_sg(const char *level, const char *prefix_str,
 		}
 
 		buf = it_page + it->offset;
-		len = min(tlen, it->length);
+		len = min_t(size_t, tlen, it->length);
 		print_hex_dump(level, prefix_str, prefix_type, rowsize,
 			       groupsize, buf, len, ascii);
 		tlen -= len;
@@ -4583,6 +4583,15 @@ static int __init caam_algapi_init(void)
 		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
 				continue;
 
+		/*
+		 * Check support for AES modes not available
+		 * on LP devices.
+		 */
+		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+			if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+			     OP_ALG_AAI_XTS)
+				continue;
+
 		t_alg = caam_alg_alloc(alg);
 		if (IS_ERR(t_alg)) {
 			err = PTR_ERR(t_alg);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index af63a6b..141aefb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -306,6 +306,7 @@
 	depends on ARCH_MMP || COMPILE_TEST
 	select DMA_ENGINE
 	select MMP_SRAM if ARCH_MMP
+	select GENERIC_ALLOCATOR
 	help
 	  Support the MMP Two-Channel DMA engine.
 	  This engine used for MMP Audio DMA and pxa910 SQU.
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index bac5f02..d5ba43a 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -317,6 +317,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
 
 		while (val) {
 			u32 desc, len;
+			int error;
+
+			error = pm_runtime_get(cdd->ddev.dev);
+			if (error < 0)
+				dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
+					__func__, error);
 
 			q_num = __fls(val);
 			val &= ~(1 << q_num);
@@ -338,7 +344,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
 			dma_cookie_complete(&c->txd);
 			dmaengine_desc_get_callback_invoke(&c->txd, NULL);
 
-			/* Paired with cppi41_dma_issue_pending */
 			pm_runtime_mark_last_busy(cdd->ddev.dev);
 			pm_runtime_put_autosuspend(cdd->ddev.dev);
 		}
@@ -362,8 +367,13 @@ static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
 	int error;
 
 	error = pm_runtime_get_sync(cdd->ddev.dev);
-	if (error < 0)
+	if (error < 0) {
+		dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
+			__func__, error);
+		pm_runtime_put_noidle(cdd->ddev.dev);
+
 		return error;
+	}
 
 	dma_cookie_init(chan);
 	dma_async_tx_descriptor_init(&c->txd, chan);
@@ -385,8 +395,11 @@ static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
 	int error;
 
 	error = pm_runtime_get_sync(cdd->ddev.dev);
-	if (error < 0)
+	if (error < 0) {
+		pm_runtime_put_noidle(cdd->ddev.dev);
+
 		return;
+	}
 
 	WARN_ON(!list_empty(&cdd->pending));
 
@@ -460,9 +473,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
 	struct cppi41_dd *cdd = c->cdd;
 	int error;
 
-	/* PM runtime paired with dmaengine_desc_get_callback_invoke */
 	error = pm_runtime_get(cdd->ddev.dev);
 	if ((error != -EINPROGRESS) && error < 0) {
+		pm_runtime_put_noidle(cdd->ddev.dev);
 		dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
 			error);
 
@@ -473,6 +486,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
 		push_desc_queue(c);
 	else
 		pending_desc(c);
+
+	pm_runtime_mark_last_busy(cdd->ddev.dev);
+	pm_runtime_put_autosuspend(cdd->ddev.dev);
 }
 
 static u32 get_host_pd0(u32 length)
@@ -1059,8 +1075,8 @@ static int cppi41_dma_probe(struct platform_device *pdev)
 	deinit_cppi41(dev, cdd);
 err_init_cppi:
 	pm_runtime_dont_use_autosuspend(dev);
-	pm_runtime_put_sync(dev);
 err_get_sync:
+	pm_runtime_put_sync(dev);
 	pm_runtime_disable(dev);
 	iounmap(cdd->usbss_mem);
 	iounmap(cdd->ctrl_mem);
@@ -1072,7 +1088,12 @@ static int cppi41_dma_probe(struct platform_device *pdev)
 static int cppi41_dma_remove(struct platform_device *pdev)
 {
 	struct cppi41_dd *cdd = platform_get_drvdata(pdev);
+	int error;
 
+	error = pm_runtime_get_sync(&pdev->dev);
+	if (error < 0)
+		dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
+			__func__, error);
 	of_dma_controller_free(pdev->dev.of_node);
 	dma_async_device_unregister(&cdd->ddev);
 
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index e18a580..77242b3 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1628,6 +1628,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
 	if (echan->slot[0] < 0) {
 		dev_err(dev, "Entry slot allocation failed for channel %u\n",
 			EDMA_CHAN_SLOT(echan->ch_num));
+		ret = echan->slot[0];
 		goto err_slot;
 	}
 
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 8346199..a235878 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -578,7 +578,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
 
 	burst = convert_burst(8);
 	width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
-	v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
+	v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
 		DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
 		DMA_CHAN_CFG_DST_LINEAR_MODE |
 		DMA_CHAN_CFG_SRC_LINEAR_MODE |
diff --git a/drivers/edac/qcom_llcc_edac.c b/drivers/edac/qcom_llcc_edac.c
index 71f74ad..18b2da7 100644
--- a/drivers/edac/qcom_llcc_edac.c
+++ b/drivers/edac/qcom_llcc_edac.c
@@ -18,6 +18,7 @@
 #include <linux/spinlock.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
+#include <linux/interrupt.h>
 #include "edac_core.h"
 
 #ifdef CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE
@@ -76,6 +77,11 @@
 static int poll_msec = 5000;
 module_param(poll_msec, int, 0444);
 
+static int interrupt_mode;
+module_param(interrupt_mode, int, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+		 "Controls whether to use interrupt or poll mode");
+
 enum {
 	LLCC_DRAM_CE = 0,
 	LLCC_DRAM_UE,
@@ -91,6 +97,7 @@ struct errors_edac {
 
 struct erp_drvdata {
 	struct regmap *llcc_map;
+	u32 ecc_irq;
 };
 
 static const struct errors_edac errors[] = {
@@ -262,7 +269,7 @@ static void dump_syn_reg(struct edac_device_ctl_info *edev_ctl,
 	errors[err_type].func(edev_ctl, 0, 0, errors[err_type].msg);
 }
 
-static void qcom_llcc_poll_cache_errors
+static void qcom_llcc_check_cache_errors
 		(struct edac_device_ctl_info *edev_ctl)
 {
 	u32 drp_error;
@@ -295,6 +302,18 @@ static void qcom_llcc_poll_cache_errors
 	}
 }
 
+static void qcom_llcc_poll_cache_errors(struct edac_device_ctl_info *edev_ctl)
+{
+	qcom_llcc_check_cache_errors(edev_ctl);
+}
+
+static irqreturn_t llcc_ecc_irq_handler
+			(int irq, void *edev_ctl)
+{
+	qcom_llcc_check_cache_errors(edev_ctl);
+	return IRQ_HANDLED;
+}
+
 static int qcom_llcc_erp_probe(struct platform_device *pdev)
 {
 	int rc = 0;
@@ -321,12 +340,29 @@ static int qcom_llcc_erp_probe(struct platform_device *pdev)
 	drv->llcc_map = syscon_node_to_regmap(dev->parent->of_node);
 	if (IS_ERR(drv->llcc_map)) {
 		dev_err(dev, "no regmap for syscon llcc parent\n");
-		return -ENOMEM;
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	if (interrupt_mode) {
+		drv->ecc_irq = platform_get_irq_byname(pdev, "ecc_irq");
+		if (!drv->ecc_irq) {
+			rc = -ENODEV;
+			goto out;
+		}
+
+		rc = devm_request_irq(dev, drv->ecc_irq, llcc_ecc_irq_handler,
+				IRQF_TRIGGER_RISING, "llcc_ecc", edev_ctl);
+		if (rc) {
+			dev_err(dev, "failed to request ecc irq\n");
+			goto out;
+		}
 	}
 
 	platform_set_drvdata(pdev, edev_ctl);
 
 	rc = edac_device_add_device(edev_ctl);
+out:
 	if (rc)
 		edac_device_free_ctl_info(edev_ctl);
 
diff --git a/drivers/esoc/Kconfig b/drivers/esoc/Kconfig
new file mode 100644
index 0000000..0efca1e
--- /dev/null
+++ b/drivers/esoc/Kconfig
@@ -0,0 +1,64 @@
+#
+# External soc control infrastructure and drivers
+#
+menuconfig ESOC
+	bool "External SOCs Control"
+	help
+	  External SOCs can be powered on and monitored by user
+	  space or kernel drivers. Additionally they can be controlled
+	  to respond to control commands. This framework provides an
+	  interface to track events related to the external slave socs.
+
+if ESOC
+
+config ESOC_DEV
+	bool "ESOC userspace interface"
+	help
+	  Say yes here to enable a userspace representation of the control
+	  link. Userspace can register a request engine or a command engine
+	  for the external soc. It can receive event notifications from the
+	  control link.
+
+config ESOC_CLIENT
+	bool "ESOC client interface"
+	depends on OF
+	help
+	  Say yes here to enable client interface for external socs.
+	  Clients can specify the external soc that they are interested in
+	  by using device tree phandles. Based on this, clients can register
+	  for notifications from a specific soc.
+
+config ESOC_DEBUG
+	bool "ESOC debug support"
+	help
+	  Say yes here to enable debugging support in the ESOC framework
+	  and individual esoc drivers.
+	  If you wish to debug the esoc driver and enable more logging enable
+	  this option. Based on this, DEBUG macro would be defined which will
+	  allow logging of different esoc driver traces.
+
+config ESOC_MDM_4x
+	bool "Add support for external mdm9x25/mdm9x35/mdm9x45/mdm9x55"
+	help
+	  In some Qualcomm Technologies, Inc. boards, an external modem such as
+	  mdm9x25 or mdm9x35 is connected to a primary msm. The primary soc can
+	  control/monitor the modem via gpios. The data communication with such
+	  modems can occur over PCIE or HSIC.
+
+config ESOC_MDM_DRV
+	tristate "Command engine for 4x series external modems"
+	help
+	  Provides a command engine to control the behavior of an external modem
+	  such as mdm9x25/mdm9x35/mdm9x45/mdm9x55/QSC. Allows the primary soc to put the
+	  external modem in a specific mode. Also listens for events on the
+	  external modem.
+
+config ESOC_MDM_DBG_ENG
+	tristate "debug engine for 4x series external modems"
+	depends on ESOC_MDM_DRV
+	help
+	  Provides a user interface to mask out certain commands sent
+	  by command engine to the external modem. Also allows masking
+	  of certain notifications being sent to the external modem.
+
+endif
diff --git a/drivers/esoc/Makefile b/drivers/esoc/Makefile
new file mode 100644
index 0000000..76137ea
--- /dev/null
+++ b/drivers/esoc/Makefile
@@ -0,0 +1,9 @@
+# generic  external soc control support
+
+ccflags-$(CONFIG_ESOC_DEBUG)	:= -DDEBUG
+obj-$(CONFIG_ESOC)	+= esoc_bus.o
+obj-$(CONFIG_ESOC_DEV)	+= esoc_dev.o
+obj-$(CONFIG_ESOC_CLIENT)	+= esoc_client.o
+obj-$(CONFIG_ESOC_MDM_4x)	+= esoc-mdm-pon.o esoc-mdm-4x.o
+obj-$(CONFIG_ESOC_MDM_DRV)	+= esoc-mdm-drv.o
+obj-$(CONFIG_ESOC_MDM_DBG_ENG)	+= esoc-mdm-dbg-eng.o
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
new file mode 100644
index 0000000..b1834e2
--- /dev/null
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -0,0 +1,1033 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/coresight.h>
+#include <linux/coresight-cti.h>
+#include <linux/workqueue.h>
+#include <soc/qcom/sysmon.h>
+#include "esoc-mdm.h"
+
+enum gpio_update_config {
+	GPIO_UPDATE_BOOTING_CONFIG = 1,
+	GPIO_UPDATE_RUNNING_CONFIG,
+};
+
+enum irq_mask {
+	IRQ_ERRFATAL = 0x1,
+	IRQ_STATUS = 0x2,
+	IRQ_PBLRDY = 0x4,
+};
+
+
+static struct gpio_map {
+	const char *name;
+	int index;
+} gpio_map[] = {
+	{"qcom,mdm2ap-errfatal-gpio",   MDM2AP_ERRFATAL},
+	{"qcom,ap2mdm-errfatal-gpio",   AP2MDM_ERRFATAL},
+	{"qcom,mdm2ap-status-gpio",     MDM2AP_STATUS},
+	{"qcom,ap2mdm-status-gpio",     AP2MDM_STATUS},
+	{"qcom,mdm2ap-pblrdy-gpio",     MDM2AP_PBLRDY},
+	{"qcom,ap2mdm-wakeup-gpio",     AP2MDM_WAKEUP},
+	{"qcom,ap2mdm-chnlrdy-gpio",    AP2MDM_CHNLRDY},
+	{"qcom,mdm2ap-wakeup-gpio",     MDM2AP_WAKEUP},
+	{"qcom,ap2mdm-vddmin-gpio",     AP2MDM_VDDMIN},
+	{"qcom,mdm2ap-vddmin-gpio",     MDM2AP_VDDMIN},
+	{"qcom,ap2mdm-pmic-pwr-en-gpio", AP2MDM_PMIC_PWR_EN},
+	{"qcom,mdm-link-detect-gpio", MDM_LINK_DETECT},
+};
+
+/* Required gpios */
+static const int required_gpios[] = {
+	MDM2AP_ERRFATAL,
+	AP2MDM_ERRFATAL,
+	MDM2AP_STATUS,
+	AP2MDM_STATUS,
+};
+
+static void mdm_debug_gpio_show(struct mdm_ctrl *mdm)
+{
+	struct device *dev = mdm->dev;
+
+	dev_dbg(dev, "%s: MDM2AP_ERRFATAL gpio = %d\n",
+			__func__, MDM_GPIO(mdm, MDM2AP_ERRFATAL));
+	dev_dbg(dev, "%s: AP2MDM_ERRFATAL gpio = %d\n",
+			__func__, MDM_GPIO(mdm, AP2MDM_ERRFATAL));
+	dev_dbg(dev, "%s: MDM2AP_STATUS gpio = %d\n",
+			__func__, MDM_GPIO(mdm, MDM2AP_STATUS));
+	dev_dbg(dev, "%s: AP2MDM_STATUS gpio = %d\n",
+			__func__, MDM_GPIO(mdm, AP2MDM_STATUS));
+	dev_dbg(dev, "%s: AP2MDM_SOFT_RESET gpio = %d\n",
+			__func__, MDM_GPIO(mdm, AP2MDM_SOFT_RESET));
+	dev_dbg(dev, "%s: MDM2AP_WAKEUP gpio = %d\n",
+			__func__, MDM_GPIO(mdm, MDM2AP_WAKEUP));
+	dev_dbg(dev, "%s: AP2MDM_WAKEUP gpio = %d\n",
+			 __func__, MDM_GPIO(mdm, AP2MDM_WAKEUP));
+	dev_dbg(dev, "%s: AP2MDM_PMIC_PWR_EN gpio = %d\n",
+			 __func__, MDM_GPIO(mdm, AP2MDM_PMIC_PWR_EN));
+	dev_dbg(dev, "%s: MDM2AP_PBLRDY gpio = %d\n",
+			 __func__, MDM_GPIO(mdm, MDM2AP_PBLRDY));
+	dev_dbg(dev, "%s: AP2MDM_VDDMIN gpio = %d\n",
+			 __func__, MDM_GPIO(mdm, AP2MDM_VDDMIN));
+	dev_dbg(dev, "%s: MDM2AP_VDDMIN gpio = %d\n",
+			 __func__, MDM_GPIO(mdm, MDM2AP_VDDMIN));
+}
+
+static void mdm_enable_irqs(struct mdm_ctrl *mdm)
+{
+	if (!mdm)
+		return;
+	if (mdm->irq_mask & IRQ_ERRFATAL) {
+		enable_irq(mdm->errfatal_irq);
+		irq_set_irq_wake(mdm->errfatal_irq, 1);
+		mdm->irq_mask &= ~IRQ_ERRFATAL;
+	}
+	if (mdm->irq_mask & IRQ_STATUS) {
+		enable_irq(mdm->status_irq);
+		irq_set_irq_wake(mdm->status_irq, 1);
+		mdm->irq_mask &= ~IRQ_STATUS;
+	}
+	if (mdm->irq_mask & IRQ_PBLRDY) {
+		enable_irq(mdm->pblrdy_irq);
+		mdm->irq_mask &= ~IRQ_PBLRDY;
+	}
+}
+
+static void mdm_disable_irqs(struct mdm_ctrl *mdm)
+{
+	if (!mdm)
+		return;
+	if (!(mdm->irq_mask & IRQ_ERRFATAL)) {
+		irq_set_irq_wake(mdm->errfatal_irq, 0);
+		disable_irq_nosync(mdm->errfatal_irq);
+		mdm->irq_mask |= IRQ_ERRFATAL;
+	}
+	if (!(mdm->irq_mask & IRQ_STATUS)) {
+		irq_set_irq_wake(mdm->status_irq, 0);
+		disable_irq_nosync(mdm->status_irq);
+		mdm->irq_mask |= IRQ_STATUS;
+	}
+	if (!(mdm->irq_mask & IRQ_PBLRDY)) {
+		disable_irq_nosync(mdm->pblrdy_irq);
+		mdm->irq_mask |= IRQ_PBLRDY;
+	}
+}
+
+static void mdm_deconfigure_ipc(struct mdm_ctrl *mdm)
+{
+	int i;
+
+	for (i = 0; i < NUM_GPIOS; ++i) {
+		if (gpio_is_valid(MDM_GPIO(mdm, i)))
+			gpio_free(MDM_GPIO(mdm, i));
+	}
+	if (mdm->mdm_queue) {
+		destroy_workqueue(mdm->mdm_queue);
+		mdm->mdm_queue = NULL;
+	}
+}
+
+static void mdm_update_gpio_configs(struct mdm_ctrl *mdm,
+				enum gpio_update_config gpio_config)
+{
+	struct pinctrl_state *pins_state = NULL;
+	/* Some gpio configuration may need updating after modem bootup.*/
+	switch (gpio_config) {
+	case GPIO_UPDATE_RUNNING_CONFIG:
+		pins_state = mdm->gpio_state_running;
+		break;
+	case GPIO_UPDATE_BOOTING_CONFIG:
+		pins_state = mdm->gpio_state_booting;
+		break;
+	default:
+		pins_state = NULL;
+		dev_err(mdm->dev, "%s: called with no config\n", __func__);
+		break;
+	}
+	if (pins_state != NULL) {
+		if (pinctrl_select_state(mdm->pinctrl, pins_state))
+			dev_err(mdm->dev, "switching gpio config failed\n");
+	}
+}
+
+static void mdm_trigger_dbg(struct mdm_ctrl *mdm)
+{
+	int ret;
+
+	if (mdm->dbg_mode && !mdm->trig_cnt) {
+		ret = coresight_cti_pulse_trig(mdm->cti, MDM_CTI_CH);
+		mdm->trig_cnt++;
+		if (ret)
+			dev_err(mdm->dev, "unable to trigger cti pulse on\n");
+	}
+}
+
+static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
+{
+	unsigned long end_time;
+	bool status_down = false;
+	struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
+	struct device *dev = mdm->dev;
+	int ret;
+	bool graceful_shutdown = false;
+
+	switch (cmd) {
+	case ESOC_PWR_ON:
+		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
+		mdm_enable_irqs(mdm);
+		mdm->init = 1;
+		mdm_do_first_power_on(mdm);
+		break;
+	case ESOC_PWR_OFF:
+		mdm_disable_irqs(mdm);
+		mdm->debug = 0;
+		mdm->ready = false;
+		mdm->trig_cnt = 0;
+		graceful_shutdown = true;
+		ret = sysmon_send_shutdown(&esoc->subsys);
+		if (ret) {
+			dev_err(mdm->dev, "sysmon shutdown fail, ret = %d\n",
+									ret);
+			graceful_shutdown = false;
+			goto force_poff;
+		}
+		dev_dbg(mdm->dev, "Waiting for status gpio go low\n");
+		status_down = false;
+		end_time = jiffies + msecs_to_jiffies(10000);
+		while (time_before(jiffies, end_time)) {
+			if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS))
+									== 0) {
+				dev_dbg(dev, "Status went low\n");
+				status_down = true;
+				break;
+			}
+			msleep(100);
+		}
+		if (status_down)
+			dev_dbg(dev, "shutdown successful\n");
+		else
+			dev_err(mdm->dev, "graceful poff ipc fail\n");
+		break;
+force_poff:
+	case ESOC_FORCE_PWR_OFF:
+		if (!graceful_shutdown) {
+			mdm_disable_irqs(mdm);
+			mdm->debug = 0;
+			mdm->ready = false;
+			mdm->trig_cnt = 0;
+
+			dev_err(mdm->dev, "Graceful shutdown fail, ret = %d\n",
+				esoc->subsys.sysmon_shutdown_ret);
+		}
+
+		/*
+		 * Force a shutdown of the mdm. This is required in order
+		 * to prevent the mdm from immediately powering back on
+		 * after the shutdown
+		 */
+		gpio_set_value(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
+		esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
+		mdm_power_down(mdm);
+		mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
+		break;
+	case ESOC_RESET:
+		mdm_toggle_soft_reset(mdm, false);
+		break;
+	case ESOC_PREPARE_DEBUG:
+		/*
+		 * disable all irqs except request irq (pblrdy)
+		 * force a reset of the mdm by signaling
+		 * an APQ crash, wait till mdm is ready for ramdumps.
+		 */
+		mdm->ready = false;
+		cancel_delayed_work(&mdm->mdm2ap_status_check_work);
+		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
+		dev_dbg(mdm->dev, "set ap2mdm errfatal to force reset\n");
+		msleep(mdm->ramdump_delay_ms);
+		break;
+	case ESOC_EXE_DEBUG:
+		mdm->debug = 1;
+		mdm->trig_cnt = 0;
+		mdm_toggle_soft_reset(mdm, false);
+		/*
+		 * wait for ramdumps to be collected
+		 * then power down the mdm and switch gpios to booting
+		 * config
+		 */
+		wait_for_completion(&mdm->debug_done);
+		if (mdm->debug_fail) {
+			dev_err(mdm->dev, "unable to collect ramdumps\n");
+			mdm->debug = 0;
+			return -EIO;
+		}
+		dev_dbg(mdm->dev, "ramdump collection done\n");
+		mdm->debug = 0;
+		init_completion(&mdm->debug_done);
+		break;
+	case ESOC_EXIT_DEBUG:
+		/*
+		 * Deassert APQ to mdm err fatal
+		 * Power on the mdm
+		 */
+		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
+		dev_dbg(mdm->dev, "exiting debug state after power on\n");
+		mdm->get_restart_reason = true;
+		break;
+	default:
+		return -EINVAL;
+	};
+	return 0;
+}
+
+static void mdm2ap_status_check(struct work_struct *work)
+{
+	struct mdm_ctrl *mdm =
+		container_of(work, struct mdm_ctrl,
+					 mdm2ap_status_check_work.work);
+	struct device *dev = mdm->dev;
+	struct esoc_clink *esoc = mdm->esoc;
+
+	if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0) {
+		dev_dbg(dev, "MDM2AP_STATUS did not go high\n");
+		esoc_clink_evt_notify(ESOC_UNEXPECTED_RESET, esoc);
+	}
+}
+
+static void mdm_status_fn(struct work_struct *work)
+{
+	struct mdm_ctrl *mdm =
+		container_of(work, struct mdm_ctrl, mdm_status_work);
+	struct device *dev = mdm->dev;
+	int value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
+
+	dev_dbg(dev, "%s: status:%d\n", __func__, value);
+	/* Update gpio configuration to "running" config. */
+	mdm_update_gpio_configs(mdm, GPIO_UPDATE_RUNNING_CONFIG);
+}
+
+static void mdm_get_restart_reason(struct work_struct *work)
+{
+	int ret, ntries = 0;
+	char sfr_buf[RD_BUF_SIZE];
+	struct mdm_ctrl *mdm =
+		container_of(work, struct mdm_ctrl, restart_reason_work);
+	struct device *dev = mdm->dev;
+
+	do {
+		ret = sysmon_get_reason(&mdm->esoc->subsys, sfr_buf,
+							sizeof(sfr_buf));
+		if (!ret) {
+			dev_err(dev, "mdm restart reason is %s\n", sfr_buf);
+			break;
+		}
+		msleep(SFR_RETRY_INTERVAL);
+	} while (++ntries < SFR_MAX_RETRIES);
+	if (ntries == SFR_MAX_RETRIES)
+		dev_dbg(dev, "%s: Error retrieving restart reason: %d\n",
+						__func__, ret);
+	mdm->get_restart_reason = false;
+}
+
+static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc)
+{
+	bool status_down;
+	uint64_t timeout;
+	uint64_t now;
+	struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
+	struct device *dev = mdm->dev;
+
+	switch (notify) {
+	case ESOC_IMG_XFER_DONE:
+		if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) ==  0)
+			schedule_delayed_work(&mdm->mdm2ap_status_check_work,
+				msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
+		break;
+	case ESOC_BOOT_DONE:
+		esoc_clink_evt_notify(ESOC_RUN_STATE, esoc);
+		break;
+	case ESOC_IMG_XFER_RETRY:
+		mdm->init = 1;
+		mdm_toggle_soft_reset(mdm, false);
+		break;
+	case ESOC_IMG_XFER_FAIL:
+		esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc);
+		break;
+	case ESOC_BOOT_FAIL:
+		esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc);
+		break;
+	case ESOC_UPGRADE_AVAILABLE:
+		break;
+	case ESOC_DEBUG_DONE:
+		mdm->debug_fail = false;
+		mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
+		complete(&mdm->debug_done);
+		break;
+	case ESOC_DEBUG_FAIL:
+		mdm->debug_fail = true;
+		complete(&mdm->debug_done);
+		break;
+	case ESOC_PRIMARY_CRASH:
+		mdm_disable_irqs(mdm);
+		status_down = false;
+		dev_dbg(dev, "signal apq err fatal for graceful restart\n");
+		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
+		timeout = local_clock();
+		do_div(timeout, NSEC_PER_MSEC);
+		timeout += MDM_MODEM_TIMEOUT;
+		do {
+			if (gpio_get_value(MDM_GPIO(mdm,
+						MDM2AP_STATUS)) == 0) {
+				status_down = true;
+				break;
+			}
+			now = local_clock();
+			do_div(now, NSEC_PER_MSEC);
+		} while (!time_after64(now, timeout));
+
+		if (!status_down) {
+			dev_err(mdm->dev, "%s MDM2AP status did not go low\n",
+								__func__);
+			mdm_toggle_soft_reset(mdm, true);
+		}
+		break;
+	case ESOC_PRIMARY_REBOOT:
+		mdm_disable_irqs(mdm);
+		mdm->debug = 0;
+		mdm->ready = false;
+		mdm_cold_reset(mdm);
+		break;
+	};
+}
+
+static irqreturn_t mdm_errfatal(int irq, void *dev_id)
+{
+	struct mdm_ctrl *mdm = (struct mdm_ctrl *)dev_id;
+	struct esoc_clink *esoc;
+	struct device *dev;
+
+	if (!mdm)
+		goto no_mdm_irq;
+	dev = mdm->dev;
+	if (!mdm->ready)
+		goto mdm_pwroff_irq;
+	esoc = mdm->esoc;
+	dev_err(dev, "%s: mdm sent errfatal interrupt\n",
+					 __func__);
+	/* disable irq ?*/
+	esoc_clink_evt_notify(ESOC_ERR_FATAL, esoc);
+	return IRQ_HANDLED;
+mdm_pwroff_irq:
+	dev_info(dev, "errfatal irq when in pwroff\n");
+no_mdm_irq:
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mdm_status_change(int irq, void *dev_id)
+{
+	int value;
+	struct esoc_clink *esoc;
+	struct mdm_ctrl *mdm = (struct mdm_ctrl *)dev_id;
+	struct device *dev = mdm->dev;
+
+	if (!mdm)
+		return IRQ_HANDLED;
+	esoc = mdm->esoc;
+	value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
+	if (value == 0 && mdm->ready) {
+		dev_err(dev, "unexpected reset external modem\n");
+		esoc_clink_evt_notify(ESOC_UNEXPECTED_RESET, esoc);
+	} else if (value == 1) {
+		cancel_delayed_work(&mdm->mdm2ap_status_check_work);
+		dev_dbg(dev, "status = 1: mdm is now ready\n");
+		mdm->ready = true;
+		mdm_trigger_dbg(mdm);
+		queue_work(mdm->mdm_queue, &mdm->mdm_status_work);
+		if (mdm->get_restart_reason)
+			queue_work(mdm->mdm_queue, &mdm->restart_reason_work);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mdm_pblrdy_change(int irq, void *dev_id)
+{
+	struct mdm_ctrl *mdm;
+	struct device *dev;
+	struct esoc_clink *esoc;
+
+	mdm = (struct mdm_ctrl *)dev_id;
+	if (!mdm)
+		return IRQ_HANDLED;
+	esoc = mdm->esoc;
+	dev = mdm->dev;
+	dev_dbg(dev, "pbl ready %d:\n",
+			gpio_get_value(MDM_GPIO(mdm, MDM2AP_PBLRDY)));
+	if (mdm->init) {
+		mdm->init = 0;
+		mdm_trigger_dbg(mdm);
+		esoc_clink_queue_request(ESOC_REQ_IMG, esoc);
+		return IRQ_HANDLED;
+	}
+	if (mdm->debug)
+		esoc_clink_queue_request(ESOC_REQ_DEBUG, esoc);
+	return IRQ_HANDLED;
+}
+
+static int mdm_get_status(u32 *status, struct esoc_clink *esoc)
+{
+	struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
+
+	if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0)
+		*status = 0;
+	else
+		*status = 1;
+	return 0;
+}
+
+static void mdm_configure_debug(struct mdm_ctrl *mdm)
+{
+	void __iomem *addr;
+	unsigned int val;
+	int ret;
+	struct device_node *node = mdm->dev->of_node;
+
+	addr = of_iomap(node, 0);
+	if (IS_ERR(addr)) {
+		dev_err(mdm->dev, "failed to get debug base address\n");
+		return;
+	}
+	mdm->dbg_addr = addr + MDM_DBG_OFFSET;
+	val = readl_relaxed(mdm->dbg_addr);
+	if (val == MDM_DBG_MODE) {
+		mdm->dbg_mode = true;
+		mdm->cti = coresight_cti_get(MDM_CTI_NAME);
+		if (IS_ERR(mdm->cti)) {
+			dev_err(mdm->dev, "unable to get cti handle\n");
+			goto cti_get_err;
+		}
+		ret = coresight_cti_map_trigout(mdm->cti, MDM_CTI_TRIG,
+								MDM_CTI_CH);
+		if (ret) {
+			dev_err(mdm->dev, "unable to map trig to channel\n");
+			goto cti_map_err;
+		}
+		mdm->trig_cnt = 0;
+	} else {
+		dev_dbg(mdm->dev, "Not in debug mode. debug mode = %u\n", val);
+		mdm->dbg_mode = false;
+	}
+	return;
+cti_map_err:
+	coresight_cti_put(mdm->cti);
+cti_get_err:
+	mdm->dbg_mode = false;
+}
+
+/* Fail if any of the required gpios is absent. */
+static int mdm_dt_parse_gpios(struct mdm_ctrl *mdm)
+{
+	int i, val, rc = 0;
+	struct device_node *node = mdm->dev->of_node;
+
+	for (i = 0; i < NUM_GPIOS; i++)
+		mdm->gpios[i] = INVALID_GPIO;
+
+	for (i = 0; i < ARRAY_SIZE(gpio_map); i++) {
+		val = of_get_named_gpio(node, gpio_map[i].name, 0);
+		if (val >= 0)
+			MDM_GPIO(mdm, gpio_map[i].index) = val;
+	}
+	/* These two are special because they can be inverted. */
+	/* Verify that the required gpios have valid values */
+	for (i = 0; i < ARRAY_SIZE(required_gpios); i++) {
+		if (MDM_GPIO(mdm, required_gpios[i]) == INVALID_GPIO) {
+			rc = -ENXIO;
+			break;
+		}
+	}
+	mdm_debug_gpio_show(mdm);
+	return rc;
+}
+
+static int mdm_configure_ipc(struct mdm_ctrl *mdm, struct platform_device *pdev)
+{
+	int ret = -1;
+	int irq;
+	struct device *dev = mdm->dev;
+	struct device_node *node = pdev->dev.of_node;
+
+	ret = of_property_read_u32(node, "qcom,ramdump-timeout-ms",
+						&mdm->dump_timeout_ms);
+	if (ret)
+		mdm->dump_timeout_ms = DEF_RAMDUMP_TIMEOUT;
+	ret = of_property_read_u32(node, "qcom,ramdump-delay-ms",
+						&mdm->ramdump_delay_ms);
+	if (ret)
+		mdm->ramdump_delay_ms = DEF_RAMDUMP_DELAY;
+	/* Multilple gpio_request calls are allowed */
+	if (gpio_request(MDM_GPIO(mdm, AP2MDM_STATUS), "AP2MDM_STATUS"))
+		dev_err(dev, "Failed to configure AP2MDM_STATUS gpio\n");
+	/* Multilple gpio_request calls are allowed */
+	if (gpio_request(MDM_GPIO(mdm, AP2MDM_ERRFATAL), "AP2MDM_ERRFATAL"))
+		dev_err(dev, "%s Failed to configure AP2MDM_ERRFATAL gpio\n",
+			   __func__);
+	if (gpio_request(MDM_GPIO(mdm, MDM2AP_STATUS), "MDM2AP_STATUS")) {
+		dev_err(dev, "%s Failed to configure MDM2AP_STATUS gpio\n",
+			   __func__);
+		goto fatal_err;
+	}
+	if (gpio_request(MDM_GPIO(mdm, MDM2AP_ERRFATAL), "MDM2AP_ERRFATAL")) {
+		dev_err(dev, "%s Failed to configure MDM2AP_ERRFATAL gpio\n",
+			   __func__);
+		goto fatal_err;
+	}
+	if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
+		if (gpio_request(MDM_GPIO(mdm, MDM2AP_PBLRDY),
+						"MDM2AP_PBLRDY")) {
+			dev_err(dev, "Cannot configure MDM2AP_PBLRDY gpio\n");
+			goto fatal_err;
+		}
+	}
+	if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_WAKEUP))) {
+		if (gpio_request(MDM_GPIO(mdm, AP2MDM_WAKEUP),
+					"AP2MDM_WAKEUP")) {
+			dev_err(dev, "Cannot configure AP2MDM_WAKEUP gpio\n");
+			goto fatal_err;
+		}
+	}
+	if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_CHNLRDY))) {
+		if (gpio_request(MDM_GPIO(mdm, AP2MDM_CHNLRDY),
+						"AP2MDM_CHNLRDY")) {
+			dev_err(dev, "Cannot configure AP2MDM_CHNLRDY gpio\n");
+			goto fatal_err;
+		}
+	}
+
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
+
+	if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_CHNLRDY)))
+		gpio_direction_output(MDM_GPIO(mdm, AP2MDM_CHNLRDY), 0);
+
+	gpio_direction_input(MDM_GPIO(mdm, MDM2AP_STATUS));
+	gpio_direction_input(MDM_GPIO(mdm, MDM2AP_ERRFATAL));
+
+	/* ERR_FATAL irq. */
+	irq = gpio_to_irq(MDM_GPIO(mdm, MDM2AP_ERRFATAL));
+	if (irq < 0) {
+		dev_err(dev, "bad MDM2AP_ERRFATAL IRQ resource\n");
+		goto errfatal_err;
+
+	}
+	ret = request_irq(irq, mdm_errfatal,
+			IRQF_TRIGGER_RISING, "mdm errfatal", mdm);
+
+	if (ret < 0) {
+		dev_err(dev, "%s: MDM2AP_ERRFATAL IRQ#%d request failed,\n",
+					__func__, irq);
+		goto errfatal_err;
+	}
+	mdm->errfatal_irq = irq;
+
+errfatal_err:
+	 /* status irq */
+	irq = gpio_to_irq(MDM_GPIO(mdm, MDM2AP_STATUS));
+	if (irq < 0) {
+		dev_err(dev, "%s: bad MDM2AP_STATUS IRQ resource, err = %d\n",
+				__func__, irq);
+		goto status_err;
+	}
+	ret = request_threaded_irq(irq, NULL, mdm_status_change,
+		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+		"mdm status", mdm);
+	if (ret < 0) {
+		dev_err(dev, "%s: MDM2AP_STATUS IRQ#%d request failed, err=%d",
+			 __func__, irq, ret);
+		goto status_err;
+	}
+	mdm->status_irq = irq;
+status_err:
+	if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
+		irq =  platform_get_irq_byname(pdev, "plbrdy_irq");
+		if (irq < 0) {
+			dev_err(dev, "%s: MDM2AP_PBLRDY IRQ request failed\n",
+				 __func__);
+			goto pblrdy_err;
+		}
+
+		ret = request_threaded_irq(irq, NULL, mdm_pblrdy_change,
+				IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+				"mdm pbl ready", mdm);
+		if (ret < 0) {
+			dev_err(dev, "MDM2AP_PBL IRQ#%d request failed %d\n",
+								irq, ret);
+			goto pblrdy_err;
+		}
+		mdm->pblrdy_irq = irq;
+	}
+	mdm_disable_irqs(mdm);
+pblrdy_err:
+	return 0;
+fatal_err:
+	mdm_deconfigure_ipc(mdm);
+	return ret;
+
+}
+
+static int mdm_pinctrl_init(struct mdm_ctrl *mdm)
+{
+	int retval = 0;
+
+	mdm->pinctrl = devm_pinctrl_get(mdm->dev);
+	if (IS_ERR_OR_NULL(mdm->pinctrl)) {
+		retval = PTR_ERR(mdm->pinctrl);
+		goto err_state_suspend;
+	}
+	mdm->gpio_state_booting =
+		pinctrl_lookup_state(mdm->pinctrl,
+				"mdm_booting");
+	if (IS_ERR_OR_NULL(mdm->gpio_state_booting)) {
+		mdm->gpio_state_running = NULL;
+		mdm->gpio_state_booting = NULL;
+	} else {
+		mdm->gpio_state_running =
+			pinctrl_lookup_state(mdm->pinctrl,
+				"mdm_running");
+		if (IS_ERR_OR_NULL(mdm->gpio_state_running)) {
+			mdm->gpio_state_booting = NULL;
+			mdm->gpio_state_running = NULL;
+		}
+	}
+	mdm->gpio_state_active =
+		pinctrl_lookup_state(mdm->pinctrl,
+				"mdm_active");
+	if (IS_ERR_OR_NULL(mdm->gpio_state_active)) {
+		retval = PTR_ERR(mdm->gpio_state_active);
+		goto err_state_active;
+	}
+	mdm->gpio_state_suspend =
+		pinctrl_lookup_state(mdm->pinctrl,
+				"mdm_suspend");
+	if (IS_ERR_OR_NULL(mdm->gpio_state_suspend)) {
+		retval = PTR_ERR(mdm->gpio_state_suspend);
+		goto err_state_suspend;
+	}
+	retval = pinctrl_select_state(mdm->pinctrl, mdm->gpio_state_active);
+	return retval;
+
+err_state_suspend:
+	mdm->gpio_state_active = NULL;
+err_state_active:
+	mdm->gpio_state_suspend = NULL;
+	mdm->gpio_state_booting = NULL;
+	mdm->gpio_state_running = NULL;
+	return retval;
+}
+static int mdm9x25_setup_hw(struct mdm_ctrl *mdm,
+					const struct mdm_ops *ops,
+					struct platform_device *pdev)
+{
+	int ret;
+	struct esoc_clink *esoc;
+	const struct esoc_clink_ops *clink_ops = ops->clink_ops;
+	const struct mdm_pon_ops *pon_ops = ops->pon_ops;
+
+	mdm->dev = &pdev->dev;
+	mdm->pon_ops = pon_ops;
+	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
+	if (IS_ERR(esoc)) {
+		dev_err(mdm->dev, "cannot allocate esoc device\n");
+		return PTR_ERR(esoc);
+	}
+	mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
+	if (!mdm->mdm_queue) {
+		dev_err(mdm->dev, "could not create mdm_queue\n");
+		return -ENOMEM;
+	}
+	mdm->irq_mask = 0;
+	mdm->ready = false;
+	ret = mdm_dt_parse_gpios(mdm);
+	if (ret)
+		return ret;
+	dev_err(mdm->dev, "parsing gpio done\n");
+	ret = mdm_pon_dt_init(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pon dt init done\n");
+	ret = mdm_pinctrl_init(mdm);
+	if (ret)
+		return ret;
+	dev_err(mdm->dev, "pinctrl init done\n");
+	ret = mdm_pon_setup(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pon setup done\n");
+	ret = mdm_configure_ipc(mdm, pdev);
+	if (ret)
+		return ret;
+	mdm_configure_debug(mdm);
+	dev_err(mdm->dev, "ipc configure done\n");
+	esoc->name = MDM9x25_LABEL;
+	esoc->link_name = MDM9x25_HSIC;
+	esoc->clink_ops = clink_ops;
+	esoc->parent = mdm->dev;
+	esoc->owner = THIS_MODULE;
+	esoc->np = pdev->dev.of_node;
+	set_esoc_clink_data(esoc, mdm);
+	ret = esoc_clink_register(esoc);
+	if (ret) {
+		dev_err(mdm->dev, "esoc registration failed\n");
+		return ret;
+	}
+	dev_dbg(mdm->dev, "esoc registration done\n");
+	init_completion(&mdm->debug_done);
+	INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
+	INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
+	INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
+	mdm->get_restart_reason = false;
+	mdm->debug_fail = false;
+	mdm->esoc = esoc;
+	mdm->init = 0;
+	return 0;
+}
+
+static int mdm9x35_setup_hw(struct mdm_ctrl *mdm,
+					const struct mdm_ops *ops,
+					struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node;
+	struct esoc_clink *esoc;
+	const struct esoc_clink_ops *clink_ops = ops->clink_ops;
+	const struct mdm_pon_ops *pon_ops = ops->pon_ops;
+
+	mdm->dev = &pdev->dev;
+	mdm->pon_ops = pon_ops;
+	node = pdev->dev.of_node;
+	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
+	if (IS_ERR(esoc)) {
+		dev_err(mdm->dev, "cannot allocate esoc device\n");
+		return PTR_ERR(esoc);
+	}
+	mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
+	if (!mdm->mdm_queue) {
+		dev_err(mdm->dev, "could not create mdm_queue\n");
+		return -ENOMEM;
+	}
+	mdm->irq_mask = 0;
+	mdm->ready = false;
+	ret = mdm_dt_parse_gpios(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "parsing gpio done\n");
+	ret = mdm_pon_dt_init(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pon dt init done\n");
+	ret = mdm_pinctrl_init(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pinctrl init done\n");
+	ret = mdm_pon_setup(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pon setup done\n");
+	ret = mdm_configure_ipc(mdm, pdev);
+	if (ret)
+		return ret;
+	mdm_configure_debug(mdm);
+	dev_dbg(mdm->dev, "ipc configure done\n");
+	esoc->name = MDM9x35_LABEL;
+	mdm->dual_interface = of_property_read_bool(node,
+						"qcom,mdm-dual-link");
+	/* Check if link gpio is available */
+	if (gpio_is_valid(MDM_GPIO(mdm, MDM_LINK_DETECT))) {
+		if (mdm->dual_interface) {
+			if (gpio_get_value(MDM_GPIO(mdm, MDM_LINK_DETECT)))
+				esoc->link_name = MDM9x35_DUAL_LINK;
+			else
+				esoc->link_name = MDM9x35_PCIE;
+		} else {
+			if (gpio_get_value(MDM_GPIO(mdm, MDM_LINK_DETECT)))
+				esoc->link_name = MDM9x35_HSIC;
+			else
+				esoc->link_name = MDM9x35_PCIE;
+		}
+	} else if (mdm->dual_interface)
+		esoc->link_name = MDM9x35_DUAL_LINK;
+	else
+		esoc->link_name = MDM9x35_HSIC;
+	esoc->clink_ops = clink_ops;
+	esoc->parent = mdm->dev;
+	esoc->owner = THIS_MODULE;
+	esoc->np = pdev->dev.of_node;
+	set_esoc_clink_data(esoc, mdm);
+	ret = esoc_clink_register(esoc);
+	if (ret) {
+		dev_err(mdm->dev, "esoc registration failed\n");
+		return ret;
+	}
+	dev_dbg(mdm->dev, "esoc registration done\n");
+	init_completion(&mdm->debug_done);
+	INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
+	INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
+	INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
+	mdm->get_restart_reason = false;
+	mdm->debug_fail = false;
+	mdm->esoc = esoc;
+	mdm->init = 0;
+	return 0;
+}
+
+static int mdm9x55_setup_hw(struct mdm_ctrl *mdm,
+					const struct mdm_ops *ops,
+					struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node;
+	struct esoc_clink *esoc;
+	const struct esoc_clink_ops *clink_ops = ops->clink_ops;
+	const struct mdm_pon_ops *pon_ops = ops->pon_ops;
+
+	mdm->dev = &pdev->dev;
+	mdm->pon_ops = pon_ops;
+	node = pdev->dev.of_node;
+	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
+	if (IS_ERR(esoc)) {
+		dev_err(mdm->dev, "cannot allocate esoc device\n");
+		return PTR_ERR(esoc);
+	}
+	mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
+	if (!mdm->mdm_queue) {
+		dev_err(mdm->dev, "could not create mdm_queue\n");
+		return -ENOMEM;
+	}
+	mdm->irq_mask = 0;
+	mdm->ready = false;
+	ret = mdm_dt_parse_gpios(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "parsing gpio done\n");
+	ret = mdm_pon_dt_init(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pon dt init done\n");
+	ret = mdm_pinctrl_init(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pinctrl init done\n");
+	ret = mdm_pon_setup(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pon setup done\n");
+	ret = mdm_configure_ipc(mdm, pdev);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "ipc configure done\n");
+	esoc->name = MDM9x55_LABEL;
+	mdm->dual_interface = of_property_read_bool(node,
+						"qcom,mdm-dual-link");
+	esoc->link_name = MDM9x55_PCIE;
+	esoc->clink_ops = clink_ops;
+	esoc->parent = mdm->dev;
+	esoc->owner = THIS_MODULE;
+	esoc->np = pdev->dev.of_node;
+	set_esoc_clink_data(esoc, mdm);
+	ret = esoc_clink_register(esoc);
+	if (ret) {
+		dev_err(mdm->dev, "esoc registration failed\n");
+		return ret;
+	}
+	dev_dbg(mdm->dev, "esoc registration done\n");
+	init_completion(&mdm->debug_done);
+	INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
+	INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
+	INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
+	mdm->get_restart_reason = false;
+	mdm->debug_fail = false;
+	mdm->esoc = esoc;
+	mdm->init = 0;
+	return 0;
+}
+
+static struct esoc_clink_ops mdm_cops = {
+	.cmd_exe = mdm_cmd_exe,
+	.get_status = mdm_get_status,
+	.notify = mdm_notify,
+};
+
+static struct mdm_ops mdm9x25_ops = {
+	.clink_ops = &mdm_cops,
+	.config_hw = mdm9x25_setup_hw,
+	.pon_ops = &mdm9x25_pon_ops,
+};
+
+static struct mdm_ops mdm9x35_ops = {
+	.clink_ops = &mdm_cops,
+	.config_hw = mdm9x35_setup_hw,
+	.pon_ops = &mdm9x35_pon_ops,
+};
+
+static struct mdm_ops mdm9x55_ops = {
+	.clink_ops = &mdm_cops,
+	.config_hw = mdm9x55_setup_hw,
+	.pon_ops = &mdm9x55_pon_ops,
+};
+
+static const struct of_device_id mdm_dt_match[] = {
+	{ .compatible = "qcom,ext-mdm9x25",
+		.data = &mdm9x25_ops, },
+	{ .compatible = "qcom,ext-mdm9x35",
+		.data = &mdm9x35_ops, },
+	{ .compatible = "qcom,ext-mdm9x55",
+		.data = &mdm9x55_ops, },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mdm_dt_match);
+
+static int mdm_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	const struct mdm_ops *mdm_ops;
+	struct device_node *node = pdev->dev.of_node;
+	struct mdm_ctrl *mdm;
+
+	match = of_match_node(mdm_dt_match, node);
+	if (IS_ERR(match))
+		return PTR_ERR(match);
+	mdm_ops = match->data;
+	mdm = devm_kzalloc(&pdev->dev, sizeof(*mdm), GFP_KERNEL);
+	if (IS_ERR(mdm))
+		return PTR_ERR(mdm);
+	return mdm_ops->config_hw(mdm, mdm_ops, pdev);
+}
+
+static struct platform_driver mdm_driver = {
+	.probe		= mdm_probe,
+	.driver = {
+		.name	= "ext-mdm",
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(mdm_dt_match),
+	},
+};
+
+static int __init mdm_register(void)
+{
+	return platform_driver_register(&mdm_driver);
+}
+module_init(mdm_register);
+
+static void __exit mdm_unregister(void)
+{
+	platform_driver_unregister(&mdm_driver);
+}
+module_exit(mdm_unregister);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/esoc/esoc-mdm-dbg-eng.c b/drivers/esoc/esoc-mdm-dbg-eng.c
new file mode 100644
index 0000000..a186ea8
--- /dev/null
+++ b/drivers/esoc/esoc-mdm-dbg-eng.c
@@ -0,0 +1,204 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include "esoc.h"
+
+/*
+ * cmd_mask : Specifies if a command/notifier is masked, and
+ * whats the trigger value for mask to take effect.
+ * @mask_trigger: trigger value for mask.
+ * @mask: boolean to determine if command should be masked.
+ */
+struct esoc_mask {
+	atomic_t mask_trigger;
+	bool mask;
+};
+
+/*
+ * manual_to_esoc_cmd: Converts a user provided command
+ * to a corresponding esoc command.
+ * @cmd: ESOC command
+ * @manual_cmd: user specified command string.
+ */
+struct manual_to_esoc_cmd {
+	unsigned int cmd;
+	char manual_cmd[20];
+};
+
+/*
+ * manual_to_esoc_notify: Converts a user provided notification
+ * to corresponding esoc notification for Primary SOC.
+ * @notfication: ESOC notification.
+ * @manual_notifier: user specified notification string.
+ */
+struct manual_to_esoc_notify {
+	unsigned int notify;
+	char manual_notify[20];
+};
+
+static const struct manual_to_esoc_cmd cmd_map[] = {
+	{
+		.cmd = ESOC_PWR_ON,
+		.manual_cmd = "PON",
+	},
+	{
+		.cmd = ESOC_PREPARE_DEBUG,
+		.manual_cmd = "ENTER_DLOAD",
+	},
+	{	.cmd = ESOC_PWR_OFF,
+		.manual_cmd = "POFF",
+	},
+	{
+		.cmd = ESOC_FORCE_PWR_OFF,
+		.manual_cmd = "FORCE_POFF",
+	},
+};
+
+static struct esoc_mask cmd_mask[] = {
+	[ESOC_PWR_ON] = {
+		.mask = false,
+		.mask_trigger = ATOMIC_INIT(1),
+	},
+	[ESOC_PREPARE_DEBUG] = {
+		.mask = false,
+		.mask_trigger = ATOMIC_INIT(0),
+	},
+	[ESOC_PWR_OFF] = {
+		.mask = false,
+		.mask_trigger = ATOMIC_INIT(0),
+	},
+	[ESOC_FORCE_PWR_OFF] = {
+		.mask = false,
+		.mask_trigger = ATOMIC_INIT(0),
+	},
+};
+
+static const struct manual_to_esoc_notify notify_map[] = {
+	{
+		.notify = ESOC_PRIMARY_REBOOT,
+		.manual_notify = "REBOOT",
+	},
+	{
+		.notify = ESOC_PRIMARY_CRASH,
+		.manual_notify = "PANIC",
+	},
+};
+
+static struct esoc_mask notify_mask[] = {
+	[ESOC_PRIMARY_REBOOT] = {
+		.mask = false,
+		.mask_trigger = ATOMIC_INIT(0),
+	},
+	[ESOC_PRIMARY_CRASH] = {
+		.mask = false,
+		.mask_trigger = ATOMIC_INIT(0),
+	},
+};
+
+bool dbg_check_cmd_mask(unsigned int cmd)
+{
+	pr_debug("command to mask %d\n", cmd);
+	if (cmd_mask[cmd].mask)
+		return atomic_add_negative(-1, &cmd_mask[cmd].mask_trigger);
+	else
+		return false;
+}
+EXPORT_SYMBOL(dbg_check_cmd_mask);
+
+bool dbg_check_notify_mask(unsigned int notify)
+{
+	pr_debug("notifier to mask %d\n", notify);
+	if (notify_mask[notify].mask)
+		return atomic_add_negative(-1,
+					&notify_mask[notify].mask_trigger);
+	else
+		return false;
+}
+EXPORT_SYMBOL(dbg_check_notify_mask);
+/*
+ * Create driver attributes that let you mask
+ * specific commands.
+ */
+static ssize_t cmd_mask_store(struct device_driver *drv, const char *buf,
+							size_t count)
+{
+	unsigned int cmd, i;
+
+	pr_debug("user input command %s", buf);
+	for (i = 0; i < ARRAY_SIZE(cmd_map); i++) {
+		if (!strcmp(cmd_map[i].manual_cmd, buf)) {
+			/*
+			 * Map manual command string to ESOC command
+			 * set mask for ESOC command
+			 */
+			cmd = cmd_map[i].cmd;
+			cmd_mask[cmd].mask = true;
+			pr_debug("Setting mask for manual command %s\n",
+								buf);
+			break;
+		}
+	}
+	if (i >= ARRAY_SIZE(cmd_map))
+		pr_err("invalid command specified\n");
+	return count;
+}
+static DRIVER_ATTR(command_mask, 00200, NULL, cmd_mask_store);
+
+static ssize_t notifier_mask_store(struct device_driver *drv, const char *buf,
+							size_t count)
+{
+	unsigned int notify, i;
+
+	pr_debug("user input notifier %s", buf);
+	for (i = 0; i < ARRAY_SIZE(notify_map); i++) {
+		if (!strcmp(buf, notify_map[i].manual_notify)) {
+			/*
+			 * Map manual notifier string to primary soc
+			 * notifier. Also set mask for the notifier.
+			 */
+			notify = notify_map[i].notify;
+			notify_mask[notify].mask = true;
+			pr_debug("Setting mask for manual notification %s\n",
+									buf);
+			break;
+		}
+	}
+	if (i >= ARRAY_SIZE(notify_map))
+		pr_err("invalid notifier specified\n");
+	return count;
+}
+static DRIVER_ATTR(notifier_mask, 00200, NULL, notifier_mask_store);
+
+int mdm_dbg_eng_init(struct esoc_drv *esoc_drv)
+{
+	int ret;
+	struct device_driver *drv = &esoc_drv->driver;
+
+	ret = driver_create_file(drv, &driver_attr_command_mask);
+	if (ret) {
+		pr_err("Unable to create command mask file\n");
+		goto cmd_mask_err;
+	}
+	ret = driver_create_file(drv, &driver_attr_notifier_mask);
+	if (ret) {
+		pr_err("Unable to create notify mask file\n");
+		goto notify_mask_err;
+	}
+	return 0;
+notify_mask_err:
+	driver_remove_file(drv, &driver_attr_command_mask);
+cmd_mask_err:
+	return ret;
+}
+EXPORT_SYMBOL(mdm_dbg_eng_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
new file mode 100644
index 0000000..473a9c7
--- /dev/null
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -0,0 +1,309 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/reboot.h>
+#include "esoc.h"
+#include "mdm-dbg.h"
+
+enum {
+	 PWR_OFF = 0x1,
+	 PWR_ON,
+	 BOOT,
+	 RUN,
+	 CRASH,
+	 IN_DEBUG,
+	 SHUTDOWN,
+	 RESET,
+	 PEER_CRASH,
+};
+
+struct mdm_drv {
+	unsigned int mode;
+	struct esoc_eng cmd_eng;
+	struct completion boot_done;
+	struct completion req_eng_wait;
+	struct esoc_clink *esoc_clink;
+	bool boot_fail;
+	struct workqueue_struct *mdm_queue;
+	struct work_struct ssr_work;
+	struct notifier_block esoc_restart;
+};
+#define to_mdm_drv(d)	container_of(d, struct mdm_drv, cmd_eng)
+
+static int esoc_msm_restart_handler(struct notifier_block *nb,
+		unsigned long action, void *data)
+{
+	struct mdm_drv *mdm_drv = container_of(nb, struct mdm_drv,
+					esoc_restart);
+	struct esoc_clink *esoc_clink = mdm_drv->esoc_clink;
+	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+	if (action == SYS_RESTART) {
+		if (mdm_dbg_stall_notify(ESOC_PRIMARY_REBOOT))
+			return NOTIFY_OK;
+		dev_dbg(&esoc_clink->dev, "Notifying esoc of cold reboot\n");
+		clink_ops->notify(ESOC_PRIMARY_REBOOT, esoc_clink);
+	}
+	return NOTIFY_OK;
+}
+static void mdm_handle_clink_evt(enum esoc_evt evt,
+					struct esoc_eng *eng)
+{
+	struct mdm_drv *mdm_drv = to_mdm_drv(eng);
+
+	switch (evt) {
+	case ESOC_INVALID_STATE:
+		mdm_drv->boot_fail = true;
+		complete(&mdm_drv->boot_done);
+		break;
+	case ESOC_RUN_STATE:
+		mdm_drv->boot_fail = false;
+		mdm_drv->mode = RUN,
+		complete(&mdm_drv->boot_done);
+		break;
+	case ESOC_UNEXPECTED_RESET:
+	case ESOC_ERR_FATAL:
+		if (mdm_drv->mode == CRASH)
+			return;
+		mdm_drv->mode = CRASH;
+		queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work);
+		break;
+	case ESOC_REQ_ENG_ON:
+		complete(&mdm_drv->req_eng_wait);
+		break;
+	default:
+		break;
+	}
+}
+
+static void mdm_ssr_fn(struct work_struct *work)
+{
+	struct mdm_drv *mdm_drv = container_of(work, struct mdm_drv, ssr_work);
+
+	/*
+	 * If restarting esoc fails, the SSR framework triggers a kernel panic
+	 */
+	esoc_clink_request_ssr(mdm_drv->esoc_clink);
+}
+
+static void mdm_crash_shutdown(const struct subsys_desc *mdm_subsys)
+{
+	struct esoc_clink *esoc_clink =
+					container_of(mdm_subsys,
+							struct esoc_clink,
+								subsys);
+	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+	if (mdm_dbg_stall_notify(ESOC_PRIMARY_CRASH))
+		return;
+
+	clink_ops->notify(ESOC_PRIMARY_CRASH, esoc_clink);
+}
+
+static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys,
+							bool force_stop)
+{
+	int ret;
+	struct esoc_clink *esoc_clink =
+	 container_of(crashed_subsys, struct esoc_clink, subsys);
+	struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
+	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+	if (mdm_drv->mode == CRASH || mdm_drv->mode == PEER_CRASH) {
+		if (mdm_dbg_stall_cmd(ESOC_PREPARE_DEBUG))
+			/* We want to mask debug command.
+			 * In this case return success
+			 * to move to next stage
+			 */
+			return 0;
+		ret = clink_ops->cmd_exe(ESOC_PREPARE_DEBUG,
+							esoc_clink);
+		if (ret) {
+			dev_err(&esoc_clink->dev, "failed to enter debug\n");
+			return ret;
+		}
+		mdm_drv->mode = IN_DEBUG;
+	} else if (!force_stop) {
+		if (esoc_clink->subsys.sysmon_shutdown_ret)
+			ret = clink_ops->cmd_exe(ESOC_FORCE_PWR_OFF,
+							esoc_clink);
+		else {
+			if (mdm_dbg_stall_cmd(ESOC_PWR_OFF))
+				/* Since power off command is masked
+				 * we return success, and leave the state
+				 * of the command engine as is.
+				 */
+				return 0;
+			ret = clink_ops->cmd_exe(ESOC_PWR_OFF, esoc_clink);
+		}
+		if (ret) {
+			dev_err(&esoc_clink->dev, "failed to exe power off\n");
+			return ret;
+		}
+		mdm_drv->mode = PWR_OFF;
+	}
+	return 0;
+}
+
+static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
+{
+	int ret;
+	struct esoc_clink *esoc_clink =
+				container_of(crashed_subsys, struct esoc_clink,
+								subsys);
+	struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
+	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+	if (!esoc_req_eng_enabled(esoc_clink)) {
+		dev_dbg(&esoc_clink->dev, "Wait for req eng registration\n");
+		wait_for_completion(&mdm_drv->req_eng_wait);
+	}
+	if (mdm_drv->mode == PWR_OFF) {
+		if (mdm_dbg_stall_cmd(ESOC_PWR_ON))
+			return -EBUSY;
+		ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink);
+		if (ret) {
+			dev_err(&esoc_clink->dev, "pwr on fail\n");
+			return ret;
+		}
+	} else if (mdm_drv->mode == IN_DEBUG) {
+		ret = clink_ops->cmd_exe(ESOC_EXIT_DEBUG, esoc_clink);
+		if (ret) {
+			dev_err(&esoc_clink->dev, "cannot exit debug mode\n");
+			return ret;
+		}
+		mdm_drv->mode = PWR_OFF;
+		ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink);
+		if (ret) {
+			dev_err(&esoc_clink->dev, "pwr on fail\n");
+			return ret;
+		}
+	}
+	wait_for_completion(&mdm_drv->boot_done);
+	if (mdm_drv->boot_fail) {
+		dev_err(&esoc_clink->dev, "booting failed\n");
+		return -EIO;
+	}
+	return 0;
+}
+
+static int mdm_subsys_ramdumps(int want_dumps,
+				const struct subsys_desc *crashed_subsys)
+{
+	int ret;
+	struct esoc_clink *esoc_clink =
+				container_of(crashed_subsys, struct esoc_clink,
+								subsys);
+	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+	if (want_dumps) {
+		ret = clink_ops->cmd_exe(ESOC_EXE_DEBUG, esoc_clink);
+		if (ret) {
+			dev_err(&esoc_clink->dev, "debugging failed\n");
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static int mdm_register_ssr(struct esoc_clink *esoc_clink)
+{
+	esoc_clink->subsys.shutdown = mdm_subsys_shutdown;
+	esoc_clink->subsys.ramdump = mdm_subsys_ramdumps;
+	esoc_clink->subsys.powerup = mdm_subsys_powerup;
+	esoc_clink->subsys.crash_shutdown = mdm_crash_shutdown;
+	return esoc_clink_register_ssr(esoc_clink);
+}
+
+int esoc_ssr_probe(struct esoc_clink *esoc_clink, struct esoc_drv *drv)
+{
+	int ret;
+	struct mdm_drv *mdm_drv;
+	struct esoc_eng *esoc_eng;
+
+	mdm_drv = devm_kzalloc(&esoc_clink->dev, sizeof(*mdm_drv), GFP_KERNEL);
+	if (IS_ERR(mdm_drv))
+		return PTR_ERR(mdm_drv);
+	esoc_eng = &mdm_drv->cmd_eng;
+	esoc_eng->handle_clink_evt = mdm_handle_clink_evt;
+	ret = esoc_clink_register_cmd_eng(esoc_clink, esoc_eng);
+	if (ret) {
+		dev_err(&esoc_clink->dev, "failed to register cmd engine\n");
+		return ret;
+	}
+	ret = mdm_register_ssr(esoc_clink);
+	if (ret)
+		goto ssr_err;
+	mdm_drv->mdm_queue = alloc_workqueue("mdm_drv_queue", 0, 0);
+	if (!mdm_drv->mdm_queue) {
+		dev_err(&esoc_clink->dev, "could not create mdm_queue\n");
+		goto queue_err;
+	}
+	esoc_set_drv_data(esoc_clink, mdm_drv);
+	init_completion(&mdm_drv->boot_done);
+	init_completion(&mdm_drv->req_eng_wait);
+	INIT_WORK(&mdm_drv->ssr_work, mdm_ssr_fn);
+	mdm_drv->esoc_clink = esoc_clink;
+	mdm_drv->mode = PWR_OFF;
+	mdm_drv->boot_fail = false;
+	mdm_drv->esoc_restart.notifier_call = esoc_msm_restart_handler;
+	ret = register_reboot_notifier(&mdm_drv->esoc_restart);
+	if (ret)
+		dev_err(&esoc_clink->dev, "register for reboot failed\n");
+	ret = mdm_dbg_eng_init(drv);
+	if (ret) {
+		debug_init_done = false;
+		dev_err(&esoc_clink->dev, "dbg engine failure\n");
+	} else {
+		dev_dbg(&esoc_clink->dev, "dbg engine initialized\n");
+		debug_init_done = true;
+	}
+	return 0;
+queue_err:
+	esoc_clink_unregister_ssr(esoc_clink);
+ssr_err:
+	esoc_clink_unregister_cmd_eng(esoc_clink, esoc_eng);
+	return ret;
+}
+
+static struct esoc_compat compat_table[] = {
+	{	.name = "MDM9x25",
+		.data = NULL,
+	},
+	{
+		.name = "MDM9x35",
+		.data = NULL,
+	},
+	{
+		.name = "MDM9x55",
+		.data = NULL,
+	},
+};
+
+static struct esoc_drv esoc_ssr_drv = {
+	.owner = THIS_MODULE,
+	.probe = esoc_ssr_probe,
+	.compat_table = compat_table,
+	.compat_entries = ARRAY_SIZE(compat_table),
+	.driver = {
+		.name = "mdm-4x",
+	},
+};
+
+int __init esoc_ssr_init(void)
+{
+	return esoc_drv_register(&esoc_ssr_drv);
+}
+module_init(esoc_ssr_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
new file mode 100644
index 0000000..47d54db
--- /dev/null
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -0,0 +1,220 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "esoc-mdm.h"
+
+/* This function can be called from atomic context. */
+static int mdm4x_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
+{
+	int soft_reset_direction_assert = 0,
+	    soft_reset_direction_de_assert = 1;
+
+	if (mdm->soft_reset_inverted) {
+		soft_reset_direction_assert = 1;
+		soft_reset_direction_de_assert = 0;
+	}
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			soft_reset_direction_assert);
+	/*
+	 * Allow PS hold assert to be detected
+	 */
+	if (!atomic)
+		usleep_range(8000, 9000);
+	else
+		mdelay(6);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			soft_reset_direction_de_assert);
+	return 0;
+}
+
+/* This function can be called from atomic context. */
+static int mdm9x55_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
+{
+	int soft_reset_direction_assert = 0,
+	    soft_reset_direction_de_assert = 1;
+
+	if (mdm->soft_reset_inverted) {
+		soft_reset_direction_assert = 1;
+		soft_reset_direction_de_assert = 0;
+	}
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			soft_reset_direction_assert);
+	/*
+	 * Allow PS hold assert to be detected
+	 */
+	if (!atomic)
+		usleep_range(203000, 300000);
+	else
+		mdelay(203);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			soft_reset_direction_de_assert);
+	return 0;
+}
+
+
+static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm)
+{
+	int i;
+	int pblrdy;
+	struct device *dev = mdm->dev;
+
+	dev_dbg(dev, "Powering on modem for the first time\n");
+	mdm_toggle_soft_reset(mdm, false);
+	/* Add a delay to allow PON sequence to complete*/
+	mdelay(50);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 1);
+	if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
+		for (i = 0; i  < MDM_PBLRDY_CNT; i++) {
+			pblrdy = gpio_get_value(MDM_GPIO(mdm, MDM2AP_PBLRDY));
+			if (pblrdy)
+				break;
+			usleep_range(5000, 6000);
+		}
+		dev_dbg(dev, "pblrdy i:%d\n", i);
+		mdelay(200);
+	}
+	/*
+	 * No PBLRDY gpio associated with this modem
+	 * Send request for image. Let userspace confirm establishment of
+	 * link to external modem.
+	 */
+	else
+		esoc_clink_queue_request(ESOC_REQ_IMG, mdm->esoc);
+	return 0;
+}
+
+static int mdm4x_power_down(struct mdm_ctrl *mdm)
+{
+	struct device *dev = mdm->dev;
+	int soft_reset_direction = mdm->soft_reset_inverted ? 1 : 0;
+	/* Assert the soft reset line whether mdm2ap_status went low or not */
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+					soft_reset_direction);
+	dev_dbg(dev, "Doing a hard reset\n");
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+						soft_reset_direction);
+	/*
+	 * Currently, there is a debounce timer on the charm PMIC. It is
+	 * necessary to hold the PMIC RESET low for 400ms
+	 * for the reset to fully take place. Sleep here to ensure the
+	 * reset has occurred before the function exits.
+	 */
+	mdelay(400);
+	return 0;
+}
+
+static int mdm9x55_power_down(struct mdm_ctrl *mdm)
+{
+	struct device *dev = mdm->dev;
+	int soft_reset_direction = mdm->soft_reset_inverted ? 1 : 0;
+	/* Assert the soft reset line whether mdm2ap_status went low or not */
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+					soft_reset_direction);
+	dev_dbg(dev, "Doing a hard reset\n");
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+						soft_reset_direction);
+	/*
+	 * Currently, there is a debounce timer on the charm PMIC. It is
+	 * necessary to hold the PMIC RESET low for 406ms
+	 * for the reset to fully take place. Sleep here to ensure the
+	 * reset has occurred before the function exits.
+	 */
+	mdelay(406);
+	return 0;
+}
+
+static void mdm4x_cold_reset(struct mdm_ctrl *mdm)
+{
+	dev_dbg(mdm->dev, "Triggering mdm cold reset");
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			!!mdm->soft_reset_inverted);
+	mdelay(300);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			!mdm->soft_reset_inverted);
+}
+
+static void mdm9x55_cold_reset(struct mdm_ctrl *mdm)
+{
+	dev_dbg(mdm->dev, "Triggering mdm cold reset");
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			!!mdm->soft_reset_inverted);
+	mdelay(334);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			!mdm->soft_reset_inverted);
+}
+
+static int mdm4x_pon_dt_init(struct mdm_ctrl *mdm)
+{
+	int val;
+	struct device_node *node = mdm->dev->of_node;
+	enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
+
+	val = of_get_named_gpio_flags(node, "qcom,ap2mdm-soft-reset-gpio",
+						0, &flags);
+	if (val >= 0) {
+		MDM_GPIO(mdm, AP2MDM_SOFT_RESET) = val;
+		if (flags & OF_GPIO_ACTIVE_LOW)
+			mdm->soft_reset_inverted = 1;
+		return 0;
+	} else
+		return -EIO;
+}
+
+static int mdm4x_pon_setup(struct mdm_ctrl *mdm)
+{
+	struct device *dev = mdm->dev;
+
+	if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_SOFT_RESET))) {
+		if (gpio_request(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+					 "AP2MDM_SOFT_RESET")) {
+			dev_err(dev, "Cannot config AP2MDM_SOFT_RESET gpio\n");
+			return -EIO;
+		}
+	}
+	return 0;
+}
+
+struct mdm_pon_ops mdm9x25_pon_ops = {
+	.pon = mdm4x_do_first_power_on,
+	.soft_reset = mdm4x_toggle_soft_reset,
+	.poff_force = mdm4x_power_down,
+	.cold_reset = mdm4x_cold_reset,
+	.dt_init = mdm4x_pon_dt_init,
+	.setup = mdm4x_pon_setup,
+};
+
+struct mdm_pon_ops mdm9x35_pon_ops = {
+	.pon = mdm4x_do_first_power_on,
+	.soft_reset = mdm4x_toggle_soft_reset,
+	.poff_force = mdm4x_power_down,
+	.cold_reset = mdm4x_cold_reset,
+	.dt_init = mdm4x_pon_dt_init,
+	.setup = mdm4x_pon_setup,
+};
+
+struct mdm_pon_ops mdm9x45_pon_ops = {
+	.pon = mdm4x_do_first_power_on,
+	.soft_reset = mdm4x_toggle_soft_reset,
+	.poff_force = mdm4x_power_down,
+	.cold_reset = mdm4x_cold_reset,
+	.dt_init = mdm4x_pon_dt_init,
+	.setup = mdm4x_pon_setup,
+};
+
+struct mdm_pon_ops mdm9x55_pon_ops = {
+	.pon = mdm4x_do_first_power_on,
+	.soft_reset = mdm9x55_toggle_soft_reset,
+	.poff_force = mdm9x55_power_down,
+	.cold_reset = mdm9x55_cold_reset,
+	.dt_init = mdm4x_pon_dt_init,
+	.setup = mdm4x_pon_setup,
+};
diff --git a/drivers/esoc/esoc-mdm.h b/drivers/esoc/esoc-mdm.h
new file mode 100644
index 0000000..fa3a576
--- /dev/null
+++ b/drivers/esoc/esoc-mdm.h
@@ -0,0 +1,156 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ESOC_MDM_H__
+#define __ESOC_MDM_H__
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include "esoc.h"
+
+#define MDM_PBLRDY_CNT			20
+#define INVALID_GPIO			(-1)
+#define MDM_GPIO(mdm, i)		(mdm->gpios[i])
+#define MDM9x25_LABEL			"MDM9x25"
+#define MDM9x25_HSIC			"HSIC"
+#define MDM9x35_LABEL			"MDM9x35"
+#define MDM9x35_PCIE			"PCIe"
+#define MDM9x35_DUAL_LINK		"HSIC+PCIe"
+#define MDM9x35_HSIC			"HSIC"
+#define MDM9x45_LABEL			"MDM9x45"
+#define MDM9x45_PCIE			"PCIe"
+#define MDM9x55_LABEL			"MDM9x55"
+#define MDM9x55_PCIE			"PCIe"
+#define MDM2AP_STATUS_TIMEOUT_MS	120000L
+#define MDM_MODEM_TIMEOUT		3000
+#define DEF_RAMDUMP_TIMEOUT		120000
+#define DEF_RAMDUMP_DELAY		2000
+#define RD_BUF_SIZE			100
+#define SFR_MAX_RETRIES			10
+#define SFR_RETRY_INTERVAL		1000
+#define MDM_DBG_OFFSET			0x934
+#define MDM_DBG_MODE			0x53444247
+#define MDM_CTI_NAME			"coresight-cti-rpm-cpu0"
+#define MDM_CTI_TRIG			0
+#define MDM_CTI_CH			0
+
+enum mdm_gpio {
+	AP2MDM_WAKEUP = 0,
+	AP2MDM_STATUS,
+	AP2MDM_SOFT_RESET,
+	AP2MDM_VDD_MIN,
+	AP2MDM_CHNLRDY,
+	AP2MDM_ERRFATAL,
+	AP2MDM_VDDMIN,
+	AP2MDM_PMIC_PWR_EN,
+	MDM2AP_WAKEUP,
+	MDM2AP_ERRFATAL,
+	MDM2AP_PBLRDY,
+	MDM2AP_STATUS,
+	MDM2AP_VDDMIN,
+	MDM_LINK_DETECT,
+	NUM_GPIOS,
+};
+
+struct mdm_pon_ops;
+
+struct mdm_ctrl {
+	unsigned int gpios[NUM_GPIOS];
+	spinlock_t status_lock;
+	struct workqueue_struct *mdm_queue;
+	struct delayed_work mdm2ap_status_check_work;
+	struct work_struct mdm_status_work;
+	struct work_struct restart_reason_work;
+	struct completion debug_done;
+	struct device *dev;
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_booting;
+	struct pinctrl_state *gpio_state_running;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+	int mdm2ap_status_valid_old_config;
+	int soft_reset_inverted;
+	int errfatal_irq;
+	int status_irq;
+	int pblrdy_irq;
+	int debug;
+	int init;
+	bool debug_fail;
+	unsigned int dump_timeout_ms;
+	unsigned int ramdump_delay_ms;
+	struct esoc_clink *esoc;
+	bool get_restart_reason;
+	unsigned long irq_mask;
+	bool ready;
+	bool dual_interface;
+	u32 status;
+	void __iomem *dbg_addr;
+	bool dbg_mode;
+	struct coresight_cti *cti;
+	int trig_cnt;
+	const struct mdm_pon_ops *pon_ops;
+};
+
+struct mdm_pon_ops {
+	int (*pon)(struct mdm_ctrl *mdm);
+	int (*soft_reset)(struct mdm_ctrl *mdm, bool atomic);
+	int (*poff_force)(struct mdm_ctrl *mdm);
+	int (*poff_cleanup)(struct mdm_ctrl *mdm);
+	void (*cold_reset)(struct mdm_ctrl *mdm);
+	int (*dt_init)(struct mdm_ctrl *mdm);
+	int (*setup)(struct mdm_ctrl *mdm);
+};
+
+struct mdm_ops {
+	struct esoc_clink_ops *clink_ops;
+	struct mdm_pon_ops *pon_ops;
+	int (*config_hw)(struct mdm_ctrl *mdm, const struct mdm_ops *ops,
+					struct platform_device *pdev);
+};
+
+static inline int mdm_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
+{
+	return mdm->pon_ops->soft_reset(mdm, atomic);
+}
+static inline int mdm_do_first_power_on(struct mdm_ctrl *mdm)
+{
+	return mdm->pon_ops->pon(mdm);
+}
+static inline int mdm_power_down(struct mdm_ctrl *mdm)
+{
+	return mdm->pon_ops->poff_force(mdm);
+}
+static inline void mdm_cold_reset(struct mdm_ctrl *mdm)
+{
+	mdm->pon_ops->cold_reset(mdm);
+}
+static inline int mdm_pon_dt_init(struct mdm_ctrl *mdm)
+{
+	return mdm->pon_ops->dt_init(mdm);
+}
+static inline int mdm_pon_setup(struct mdm_ctrl *mdm)
+{
+	return mdm->pon_ops->setup(mdm);
+}
+
+extern struct mdm_pon_ops mdm9x25_pon_ops;
+extern struct mdm_pon_ops mdm9x35_pon_ops;
+extern struct mdm_pon_ops mdm9x45_pon_ops;
+extern struct mdm_pon_ops mdm9x55_pon_ops;
+#endif
diff --git a/drivers/esoc/esoc.h b/drivers/esoc/esoc.h
new file mode 100644
index 0000000..0cec985
--- /dev/null
+++ b/drivers/esoc/esoc.h
@@ -0,0 +1,165 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ESOC_H__
+#define __ESOC_H__
+
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/esoc_ctrl.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+
+#define ESOC_DEV_MAX		4
+#define ESOC_NAME_LEN		20
+#define ESOC_LINK_LEN		20
+
+struct esoc_clink;
+/**
+ * struct esoc_eng: Engine of the esoc control link
+ * @handle_clink_req: handle incoming esoc requests.
+ * @handle_clink_evt: handle for esoc events.
+ * @esoc_clink: pointer to esoc control link.
+ */
+struct esoc_eng {
+	void (*handle_clink_req)(enum esoc_req req,
+						struct esoc_eng *eng);
+	void (*handle_clink_evt)(enum esoc_evt evt,
+						struct esoc_eng *eng);
+	struct esoc_clink *esoc_clink;
+};
+
+/**
+ * struct esoc_clink: Representation of external esoc device
+ * @name: Name of the external esoc.
+ * @link_name: name of the physical link.
+ * @parent: parent device.
+ * @dev: device for userspace interface.
+ * @id: id of the external device.
+ * @owner: owner of the device.
+ * @clink_ops: control operations for the control link
+ * @req_eng: handle for request engine.
+ * @cmd_eng: handle for command engine.
+ * @clink_data: private data of esoc control link.
+ * @compat_data: compat data of esoc driver.
+ * @subsys_desc: descriptor for subsystem restart
+ * @subsys_dev: ssr device handle.
+ * @np: device tree node for esoc_clink.
+ */
+struct esoc_clink {
+	const char *name;
+	const char *link_name;
+	struct device *parent;
+	struct device dev;
+	unsigned int id;
+	struct module *owner;
+	const struct esoc_clink_ops *clink_ops;
+	struct esoc_eng *req_eng;
+	struct esoc_eng *cmd_eng;
+	spinlock_t notify_lock;
+	void *clink_data;
+	void *compat_data;
+	struct subsys_desc subsys;
+	struct subsys_device *subsys_dev;
+	struct device_node *np;
+};
+
+/**
+ * struct esoc_clink_ops: Operations to control external soc
+ * @cmd_exe: Execute control command
+ * @get_status: Get current status, or response to previous command
+ * @notify_esoc: notify external soc of events
+ */
+struct esoc_clink_ops {
+	int (*cmd_exe)(enum esoc_cmd cmd, struct esoc_clink *dev);
+	int (*get_status)(u32 *status, struct esoc_clink *dev);
+	void (*notify)(enum esoc_notify notify, struct esoc_clink *dev);
+};
+
+/**
+ * struct esoc_compat: Compatibility of esoc drivers.
+ * @name: esoc link that driver is compatible with.
+ * @data: driver data associated with esoc clink.
+ */
+struct esoc_compat {
+	const char *name;
+	void *data;
+};
+
+/**
+ * struct esoc_drv: Driver for an esoc clink
+ * @driver: drivers for esoc.
+ * @owner: module owner of esoc driver.
+ * @compat_table: compatible table for driver.
+ * @compat_entries
+ * @probe: probe function for esoc driver.
+ */
+struct esoc_drv {
+	struct device_driver driver;
+	struct module *owner;
+	struct esoc_compat *compat_table;
+	unsigned int compat_entries;
+	int (*probe)(struct esoc_clink *esoc_clink,
+				struct esoc_drv *drv);
+};
+
+#define to_esoc_clink(d) container_of(d, struct esoc_clink, dev)
+#define to_esoc_drv(d) container_of(d, struct esoc_drv, driver)
+
+extern struct bus_type esoc_bus_type;
+
+
+/* Exported apis */
+void esoc_dev_exit(void);
+int esoc_dev_init(void);
+void esoc_clink_unregister(struct esoc_clink *esoc_dev);
+int esoc_clink_register(struct esoc_clink *esoc_dev);
+struct esoc_clink *get_esoc_clink(int id);
+struct esoc_clink *get_esoc_clink_by_node(struct device_node *node);
+void put_esoc_clink(struct esoc_clink *esoc_clink);
+void *get_esoc_clink_data(struct esoc_clink *esoc);
+void set_esoc_clink_data(struct esoc_clink *esoc, void *data);
+void esoc_clink_evt_notify(enum esoc_evt, struct esoc_clink *esoc_dev);
+void esoc_clink_queue_request(enum esoc_req req, struct esoc_clink *esoc_dev);
+void esoc_for_each_dev(void *data, int (*fn)(struct device *dev,
+								void *data));
+int esoc_clink_register_cmd_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng);
+void esoc_clink_unregister_cmd_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng);
+int esoc_clink_register_req_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng);
+void esoc_clink_unregister_req_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng);
+int esoc_drv_register(struct esoc_drv *driver);
+void esoc_set_drv_data(struct esoc_clink *esoc_clink, void *data);
+void *esoc_get_drv_data(struct esoc_clink *esoc_clink);
+/* ssr operations */
+int esoc_clink_register_ssr(struct esoc_clink *esoc_clink);
+int esoc_clink_request_ssr(struct esoc_clink *esoc_clink);
+void esoc_clink_unregister_ssr(struct esoc_clink *esoc_clink);
+/* client notification */
+#ifdef CONFIG_ESOC_CLIENT
+void notify_esoc_clients(struct esoc_clink *esoc_clink, unsigned long evt);
+#else
+static inline void notify_esoc_clients(struct esoc_clink *esoc_clink,
+							unsigned long evt)
+{
+}
+#endif
+bool esoc_req_eng_enabled(struct esoc_clink *esoc_clink);
+bool esoc_cmd_eng_enabled(struct esoc_clink *esoc_clink);
+#endif
diff --git a/drivers/esoc/esoc_bus.c b/drivers/esoc/esoc_bus.c
new file mode 100644
index 0000000..4807e2b
--- /dev/null
+++ b/drivers/esoc/esoc_bus.c
@@ -0,0 +1,386 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include "esoc.h"
+
+static DEFINE_IDA(esoc_ida);
+
+/* SYSFS */
+static ssize_t
+esoc_name_show(struct device *dev, struct device_attribute *attr,
+							char *buf)
+{
+	return snprintf(buf, ESOC_NAME_LEN, "%s", to_esoc_clink(dev)->name);
+}
+
+static ssize_t
+esoc_link_show(struct device *dev, struct device_attribute *attr,
+							char *buf)
+{
+	return snprintf(buf, ESOC_LINK_LEN, "%s",
+				to_esoc_clink(dev)->link_name);
+}
+
+static struct device_attribute esoc_clink_attrs[] = {
+
+	__ATTR_RO(esoc_name),
+	__ATTR_RO(esoc_link),
+	__ATTR_NULL,
+};
+
+static int esoc_bus_match(struct device *dev, struct device_driver *drv)
+{
+	int i = 0, match = 1;
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+	struct esoc_drv *esoc_drv = to_esoc_drv(drv);
+	int entries = esoc_drv->compat_entries;
+	struct esoc_compat *table = esoc_drv->compat_table;
+
+	for (i = 0; i < entries; i++) {
+		if (strcasecmp(esoc_clink->name, table[i].name) == 0)
+			return match;
+	}
+	return 0;
+}
+
+static int esoc_bus_probe(struct device *dev)
+{
+	int ret;
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+	struct esoc_drv *esoc_drv = to_esoc_drv(dev->driver);
+
+	ret = esoc_drv->probe(esoc_clink, esoc_drv);
+	if (ret) {
+		pr_err("failed to probe %s dev\n", esoc_clink->name);
+		return ret;
+	}
+	return 0;
+}
+
+struct bus_type esoc_bus_type = {
+	.name = "esoc",
+	.match = esoc_bus_match,
+	.dev_attrs = esoc_clink_attrs,
+};
+EXPORT_SYMBOL(esoc_bus_type);
+
+struct device esoc_bus = {
+	.init_name = "esoc-bus"
+};
+EXPORT_SYMBOL(esoc_bus);
+
+/* bus accessor */
+static void esoc_clink_release(struct device *dev)
+{
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+
+	ida_simple_remove(&esoc_ida, esoc_clink->id);
+	kfree(esoc_clink);
+}
+
+static int esoc_clink_match_id(struct device *dev, void *id)
+{
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+	int *esoc_id = (int *)id;
+
+	if (esoc_clink->id == *esoc_id) {
+		if (!try_module_get(esoc_clink->owner))
+			return 0;
+		return 1;
+	}
+	return 0;
+}
+
+static int esoc_clink_match_node(struct device *dev, void *id)
+{
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+	struct device_node *node = id;
+
+	if (esoc_clink->np == node) {
+		if (!try_module_get(esoc_clink->owner))
+			return 0;
+		return 1;
+	}
+	return 0;
+}
+
+void esoc_for_each_dev(void *data, int (*fn)(struct device *dev, void *))
+{
+	int ret;
+
+	ret = bus_for_each_dev(&esoc_bus_type, NULL, data, fn);
+}
+EXPORT_SYMBOL(esoc_for_each_dev);
+
+struct esoc_clink *get_esoc_clink(int id)
+{
+	struct esoc_clink *esoc_clink;
+	struct device *dev;
+
+	dev = bus_find_device(&esoc_bus_type, NULL, &id, esoc_clink_match_id);
+	if (IS_ERR(dev))
+		return NULL;
+	esoc_clink = to_esoc_clink(dev);
+	return esoc_clink;
+}
+EXPORT_SYMBOL(get_esoc_clink);
+
+struct esoc_clink *get_esoc_clink_by_node(struct device_node *node)
+{
+	struct esoc_clink *esoc_clink;
+	struct device *dev;
+
+	dev = bus_find_device(&esoc_bus_type, NULL, node,
+						esoc_clink_match_node);
+	if (IS_ERR(dev))
+		return NULL;
+	esoc_clink = to_esoc_clink(dev);
+	return esoc_clink;
+}
+
+void put_esoc_clink(struct esoc_clink *esoc_clink)
+{
+	module_put(esoc_clink->owner);
+}
+EXPORT_SYMBOL(put_esoc_clink);
+
+bool esoc_req_eng_enabled(struct esoc_clink *esoc_clink)
+{
+	return !esoc_clink->req_eng ? false : true;
+}
+EXPORT_SYMBOL(esoc_req_eng_enabled);
+
+bool esoc_cmd_eng_enabled(struct esoc_clink *esoc_clink)
+{
+	return !esoc_clink->cmd_eng ? false : true;
+}
+EXPORT_SYMBOL(esoc_cmd_eng_enabled);
+/* ssr operations */
+int esoc_clink_register_ssr(struct esoc_clink *esoc_clink)
+{
+	int ret;
+	int len;
+	char *subsys_name;
+
+	len = strlen("esoc") + sizeof(esoc_clink->id);
+	subsys_name = kzalloc(len, GFP_KERNEL);
+	if (IS_ERR(subsys_name))
+		return PTR_ERR(subsys_name);
+	snprintf(subsys_name, len, "esoc%d", esoc_clink->id);
+	esoc_clink->subsys.name = subsys_name;
+	esoc_clink->dev.of_node = esoc_clink->np;
+	esoc_clink->subsys.dev = &esoc_clink->dev;
+	esoc_clink->subsys_dev = subsys_register(&esoc_clink->subsys);
+	if (IS_ERR(esoc_clink->subsys_dev)) {
+		dev_err(&esoc_clink->dev, "failed to register ssr node\n");
+		ret = PTR_ERR(esoc_clink->subsys_dev);
+		goto subsys_err;
+	}
+	return 0;
+subsys_err:
+	kfree(subsys_name);
+	return ret;
+}
+EXPORT_SYMBOL(esoc_clink_register_ssr);
+
+void esoc_clink_unregister_ssr(struct esoc_clink *esoc_clink)
+{
+	subsys_unregister(esoc_clink->subsys_dev);
+	kfree(esoc_clink->subsys.name);
+}
+EXPORT_SYMBOL(esoc_clink_unregister_ssr);
+
+int esoc_clink_request_ssr(struct esoc_clink *esoc_clink)
+{
+	subsystem_restart_dev(esoc_clink->subsys_dev);
+	return 0;
+}
+EXPORT_SYMBOL(esoc_clink_request_ssr);
+
+/* bus operations */
+void esoc_clink_evt_notify(enum esoc_evt evt, struct esoc_clink *esoc_clink)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&esoc_clink->notify_lock, flags);
+	notify_esoc_clients(esoc_clink, evt);
+	if (esoc_clink->req_eng && esoc_clink->req_eng->handle_clink_evt)
+		esoc_clink->req_eng->handle_clink_evt(evt, esoc_clink->req_eng);
+	if (esoc_clink->cmd_eng && esoc_clink->cmd_eng->handle_clink_evt)
+		esoc_clink->cmd_eng->handle_clink_evt(evt, esoc_clink->cmd_eng);
+	spin_unlock_irqrestore(&esoc_clink->notify_lock, flags);
+}
+EXPORT_SYMBOL(esoc_clink_evt_notify);
+
+void *get_esoc_clink_data(struct esoc_clink *esoc)
+{
+	return esoc->clink_data;
+}
+EXPORT_SYMBOL(get_esoc_clink_data);
+
+void set_esoc_clink_data(struct esoc_clink *esoc, void *data)
+{
+	esoc->clink_data = data;
+}
+EXPORT_SYMBOL(set_esoc_clink_data);
+
+void esoc_clink_queue_request(enum esoc_req req, struct esoc_clink *esoc_clink)
+{
+	unsigned long flags;
+	struct esoc_eng *req_eng;
+
+	spin_lock_irqsave(&esoc_clink->notify_lock, flags);
+	if (esoc_clink->req_eng != NULL) {
+		req_eng = esoc_clink->req_eng;
+		req_eng->handle_clink_req(req, req_eng);
+	}
+	spin_unlock_irqrestore(&esoc_clink->notify_lock, flags);
+}
+EXPORT_SYMBOL(esoc_clink_queue_request);
+
+void esoc_set_drv_data(struct esoc_clink *esoc_clink, void *data)
+{
+	dev_set_drvdata(&esoc_clink->dev, data);
+}
+EXPORT_SYMBOL(esoc_set_drv_data);
+
+void *esoc_get_drv_data(struct esoc_clink *esoc_clink)
+{
+	return dev_get_drvdata(&esoc_clink->dev);
+}
+EXPORT_SYMBOL(esoc_get_drv_data);
+
+/* bus registration functions */
+void esoc_clink_unregister(struct esoc_clink *esoc_clink)
+{
+	if (get_device(&esoc_clink->dev) != NULL) {
+		device_unregister(&esoc_clink->dev);
+		put_device(&esoc_clink->dev);
+	}
+}
+EXPORT_SYMBOL(esoc_clink_unregister);
+
+int esoc_clink_register(struct esoc_clink *esoc_clink)
+{
+	int id, err;
+	struct device *dev;
+
+	if (!esoc_clink->name || !esoc_clink->link_name ||
+					!esoc_clink->clink_ops) {
+		dev_err(esoc_clink->parent, "invalid esoc arguments\n");
+		return -EINVAL;
+	}
+	id = ida_simple_get(&esoc_ida, 0, ESOC_DEV_MAX, GFP_KERNEL);
+	if (id < 0) {
+		err = id;
+		goto exit_ida;
+	}
+	esoc_clink->id = id;
+	dev = &esoc_clink->dev;
+	dev->bus = &esoc_bus_type;
+	dev->release = esoc_clink_release;
+	if (!esoc_clink->parent)
+		dev->parent = &esoc_bus;
+	else
+		dev->parent = esoc_clink->parent;
+	dev_set_name(dev, "esoc%d", id);
+	err = device_register(dev);
+	if (err) {
+		dev_err(esoc_clink->parent, "esoc device register failed\n");
+		goto exit_ida;
+	}
+	spin_lock_init(&esoc_clink->notify_lock);
+	return 0;
+exit_ida:
+	ida_simple_remove(&esoc_ida, id);
+	pr_err("unable to register %s, err = %d\n", esoc_clink->name, err);
+	return err;
+}
+EXPORT_SYMBOL(esoc_clink_register);
+
+int esoc_clink_register_req_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng)
+{
+	if (esoc_clink->req_eng)
+		return -EBUSY;
+	if (!eng->handle_clink_req)
+		return -EINVAL;
+	esoc_clink->req_eng = eng;
+	eng->esoc_clink = esoc_clink;
+	esoc_clink_evt_notify(ESOC_REQ_ENG_ON, esoc_clink);
+	return 0;
+}
+EXPORT_SYMBOL(esoc_clink_register_req_eng);
+
+int esoc_clink_register_cmd_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng)
+{
+	if (esoc_clink->cmd_eng)
+		return -EBUSY;
+	esoc_clink->cmd_eng = eng;
+	eng->esoc_clink = esoc_clink;
+	esoc_clink_evt_notify(ESOC_CMD_ENG_ON, esoc_clink);
+	return 0;
+}
+EXPORT_SYMBOL(esoc_clink_register_cmd_eng);
+
+void esoc_clink_unregister_req_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng)
+{
+	esoc_clink->req_eng = NULL;
+	esoc_clink_evt_notify(ESOC_REQ_ENG_OFF, esoc_clink);
+}
+EXPORT_SYMBOL(esoc_clink_unregister_req_eng);
+
+void esoc_clink_unregister_cmd_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng)
+{
+	esoc_clink->cmd_eng = NULL;
+	esoc_clink_evt_notify(ESOC_CMD_ENG_OFF, esoc_clink);
+}
+EXPORT_SYMBOL(esoc_clink_unregister_cmd_eng);
+
+int esoc_drv_register(struct esoc_drv *driver)
+{
+	int ret;
+
+	driver->driver.bus = &esoc_bus_type;
+	driver->driver.probe = esoc_bus_probe;
+	ret = driver_register(&driver->driver);
+	if (ret)
+		return ret;
+	return 0;
+}
+EXPORT_SYMBOL(esoc_drv_register);
+
+static int __init esoc_init(void)
+{
+	int ret;
+
+	ret = device_register(&esoc_bus);
+	if (ret) {
+		pr_err("esoc bus device register fail\n");
+		return ret;
+	}
+	ret = bus_register(&esoc_bus_type);
+	if (ret) {
+		pr_err("esoc bus register fail\n");
+		return ret;
+	}
+	pr_debug("esoc bus registration done\n");
+	return 0;
+}
+
+subsys_initcall(esoc_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/esoc/esoc_client.c b/drivers/esoc/esoc_client.c
new file mode 100644
index 0000000..5b194e31
--- /dev/null
+++ b/drivers/esoc/esoc_client.c
@@ -0,0 +1,132 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/esoc_client.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include "esoc.h"
+
+static DEFINE_SPINLOCK(notify_lock);
+static ATOMIC_NOTIFIER_HEAD(client_notify);
+
+static void devm_esoc_desc_release(struct device *dev, void *res)
+{
+	struct esoc_desc *esoc_desc = res;
+
+	kfree(esoc_desc->name);
+	kfree(esoc_desc->link);
+	put_esoc_clink(esoc_desc->priv);
+}
+
+static int devm_esoc_desc_match(struct device *dev, void *res, void *data)
+{
+	struct esoc_desc *esoc_desc = res;
+	return esoc_desc == data;
+}
+
+struct esoc_desc *devm_register_esoc_client(struct device *dev,
+							const char *name)
+{
+	int ret, index;
+	const char *client_desc;
+	char *esoc_prop;
+	const __be32 *parp;
+	struct device_node *esoc_node;
+	struct device_node *np = dev->of_node;
+	struct esoc_clink *esoc_clink;
+	struct esoc_desc *desc;
+	char *esoc_name, *esoc_link;
+
+	for (index = 0;; index++) {
+		esoc_prop = kasprintf(GFP_KERNEL, "esoc-%d", index);
+		parp = of_get_property(np, esoc_prop, NULL);
+		if (parp == NULL) {
+			dev_err(dev, "esoc device not present\n");
+			kfree(esoc_prop);
+			return NULL;
+		}
+		ret = of_property_read_string_index(np, "esoc-names", index,
+								&client_desc);
+		if (ret) {
+			dev_err(dev, "cannot find matching string\n");
+			kfree(esoc_prop);
+			return NULL;
+		}
+		if (strcmp(client_desc, name)) {
+			kfree(esoc_prop);
+			continue;
+		}
+		kfree(esoc_prop);
+		esoc_node = of_find_node_by_phandle(be32_to_cpup(parp));
+		esoc_clink = get_esoc_clink_by_node(esoc_node);
+		if (IS_ERR_OR_NULL(esoc_clink)) {
+			dev_err(dev, "matching esoc clink not present\n");
+			return ERR_PTR(-EPROBE_DEFER);
+		}
+		esoc_name = kasprintf(GFP_KERNEL, "esoc%d",
+							esoc_clink->id);
+		if (IS_ERR_OR_NULL(esoc_name)) {
+			dev_err(dev, "unable to allocate esoc name\n");
+			return ERR_PTR(-ENOMEM);
+		}
+		esoc_link = kasprintf(GFP_KERNEL, "%s", esoc_clink->link_name);
+		if (IS_ERR_OR_NULL(esoc_link)) {
+			dev_err(dev, "unable to allocate esoc link name\n");
+			kfree(esoc_name);
+			return ERR_PTR(-ENOMEM);
+		}
+		desc = devres_alloc(devm_esoc_desc_release,
+						sizeof(*desc), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(desc)) {
+			kfree(esoc_name);
+			kfree(esoc_link);
+			dev_err(dev, "unable to allocate esoc descriptor\n");
+			return ERR_PTR(-ENOMEM);
+		}
+		desc->name = esoc_name;
+		desc->link = esoc_link;
+		desc->priv = esoc_clink;
+		devres_add(dev, desc);
+		return desc;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(devm_register_esoc_client);
+
+void devm_unregister_esoc_client(struct device *dev,
+					struct esoc_desc *esoc_desc)
+{
+	int ret;
+
+	ret = devres_release(dev, devm_esoc_desc_release,
+				devm_esoc_desc_match, esoc_desc);
+	WARN_ON(ret);
+}
+EXPORT_SYMBOL(devm_unregister_esoc_client);
+
+int esoc_register_client_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&client_notify, nb);
+}
+EXPORT_SYMBOL(esoc_register_client_notifier);
+
+void notify_esoc_clients(struct esoc_clink *esoc_clink, unsigned long evt)
+{
+	unsigned int id;
+	unsigned long flags;
+
+	spin_lock_irqsave(&notify_lock, flags);
+	id = esoc_clink->id;
+	atomic_notifier_call_chain(&client_notify, evt, &id);
+	spin_unlock_irqrestore(&notify_lock, flags);
+}
+EXPORT_SYMBOL(notify_esoc_clients);
diff --git a/drivers/esoc/esoc_dev.c b/drivers/esoc/esoc_dev.c
new file mode 100644
index 0000000..17a30b8
--- /dev/null
+++ b/drivers/esoc/esoc_dev.c
@@ -0,0 +1,392 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kfifo.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include "esoc.h"
+
+/**
+ * struct esoc_udev: Userspace char interface
+ * @dev: interface device.
+ * @req_fifio: fifo for clink requests.
+ * @req_wait: signal availability of request from clink
+ * @req_fifo_lock: serialize access to req fifo
+ * @evt_fito: fifo for clink events
+ * @evt_wait: signal availability of clink event
+ * @evt_fifo_lock: serialize access to event fifo
+ * @list: entry in esoc dev list.
+ * @clink: reference to contorl link
+ */
+struct esoc_udev {
+	struct device *dev;
+	struct kfifo req_fifo;
+	wait_queue_head_t req_wait;
+	spinlock_t req_fifo_lock;
+	struct kfifo evt_fifo;
+	wait_queue_head_t evt_wait;
+	spinlock_t evt_fifo_lock;
+	struct list_head list;
+	struct esoc_clink *clink;
+};
+
+/**
+ * struct esoc_uhandle: Userspace handle of esoc
+ * @esoc_clink: esoc control link.
+ * @eng: esoc engine for commands/ requests.
+ * @esoc_udev: user interface device.
+ * @req_eng_reg: indicates if engine is registered as request eng
+ * @cmd_eng_reg: indicates if engine is registered as cmd eng
+ */
+struct esoc_uhandle {
+	struct esoc_clink *esoc_clink;
+	struct esoc_eng eng;
+	struct esoc_udev *esoc_udev;
+	bool req_eng_reg;
+	bool cmd_eng_reg;
+};
+
+#define ESOC_MAX_MINOR	256
+#define ESOC_MAX_REQ	8
+#define ESOC_MAX_EVT	4
+
+static LIST_HEAD(esoc_udev_list);
+static DEFINE_SPINLOCK(esoc_udev_list_lock);
+struct class *esoc_class;
+static int esoc_major;
+
+static struct esoc_udev *get_free_esoc_udev(struct esoc_clink *esoc_clink)
+{
+	struct esoc_udev *esoc_udev;
+	int err;
+
+	if (esoc_clink->id > ESOC_MAX_MINOR) {
+		pr_err("too many esoc devices\n");
+		return ERR_PTR(-ENODEV);
+	}
+	esoc_udev = kzalloc(sizeof(*esoc_udev), GFP_KERNEL);
+	if (!esoc_udev)
+		return ERR_PTR(-ENOMEM);
+	err = kfifo_alloc(&esoc_udev->req_fifo, (sizeof(u32)) * ESOC_MAX_REQ,
+								GFP_KERNEL);
+	if (err) {
+		pr_err("unable to allocate request fifo for %s\n",
+							esoc_clink->name);
+		goto req_fifo_fail;
+	}
+	err = kfifo_alloc(&esoc_udev->evt_fifo, (sizeof(u32)) * ESOC_MAX_EVT,
+								GFP_KERNEL);
+	if (err) {
+		pr_err("unable to allocate evt fifo for %s\n",
+							esoc_clink->name);
+		goto evt_fifo_fail;
+	}
+	init_waitqueue_head(&esoc_udev->req_wait);
+	init_waitqueue_head(&esoc_udev->evt_wait);
+	spin_lock_init(&esoc_udev->req_fifo_lock);
+	spin_lock_init(&esoc_udev->evt_fifo_lock);
+	esoc_udev->clink = esoc_clink;
+	spin_lock(&esoc_udev_list_lock);
+	list_add_tail(&esoc_udev->list, &esoc_udev_list);
+	spin_unlock(&esoc_udev_list_lock);
+	return esoc_udev;
+evt_fifo_fail:
+	kfifo_free(&esoc_udev->req_fifo);
+req_fifo_fail:
+	kfree(esoc_udev);
+	return ERR_PTR(-ENODEV);
+}
+
+static void return_esoc_udev(struct esoc_udev *esoc_udev)
+{
+	spin_lock(&esoc_udev_list_lock);
+	list_del(&esoc_udev->list);
+	spin_unlock(&esoc_udev_list_lock);
+	kfifo_free(&esoc_udev->req_fifo);
+	kfifo_free(&esoc_udev->evt_fifo);
+	kfree(esoc_udev);
+}
+
+static struct esoc_udev *esoc_udev_get_by_minor(unsigned int index)
+{
+	struct esoc_udev *esoc_udev;
+
+	spin_lock(&esoc_udev_list_lock);
+	list_for_each_entry(esoc_udev, &esoc_udev_list, list) {
+		if (esoc_udev->clink->id == index)
+			goto found;
+	}
+	esoc_udev = NULL;
+found:
+	spin_unlock(&esoc_udev_list_lock);
+	return esoc_udev;
+}
+
+void esoc_udev_handle_clink_req(enum esoc_req req, struct esoc_eng *eng)
+{
+	int err;
+	u32 clink_req;
+	struct esoc_clink *esoc_clink = eng->esoc_clink;
+	struct esoc_udev *esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
+
+	if (!esoc_udev)
+		return;
+	clink_req = (u32)req;
+	err = kfifo_in_spinlocked(&esoc_udev->req_fifo, &clink_req,
+						sizeof(clink_req),
+						&esoc_udev->req_fifo_lock);
+	if (err != sizeof(clink_req)) {
+		pr_err("unable to queue request for %s\n", esoc_clink->name);
+		return;
+	}
+	wake_up_interruptible(&esoc_udev->req_wait);
+}
+
+void esoc_udev_handle_clink_evt(enum esoc_evt evt, struct esoc_eng *eng)
+{
+	int err;
+	u32 clink_evt;
+	struct esoc_clink *esoc_clink = eng->esoc_clink;
+	struct esoc_udev *esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
+
+	if (!esoc_udev)
+		return;
+	clink_evt = (u32)evt;
+	err = kfifo_in_spinlocked(&esoc_udev->evt_fifo, &clink_evt,
+						sizeof(clink_evt),
+						&esoc_udev->evt_fifo_lock);
+	if (err != sizeof(clink_evt)) {
+		pr_err("unable to queue event for %s\n", esoc_clink->name);
+		return;
+	}
+	wake_up_interruptible(&esoc_udev->evt_wait);
+}
+
+static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
+						unsigned long arg)
+{
+	int err;
+	u32 esoc_cmd, status, req, evt;
+	struct esoc_uhandle *uhandle = file->private_data;
+	struct esoc_udev *esoc_udev = uhandle->esoc_udev;
+	struct esoc_clink *esoc_clink = uhandle->esoc_clink;
+	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+	void __user *uarg = (void __user *)arg;
+
+	switch (cmd) {
+	case ESOC_REG_REQ_ENG:
+		err = esoc_clink_register_req_eng(esoc_clink, &uhandle->eng);
+		if (err)
+			return err;
+		uhandle->req_eng_reg = true;
+		break;
+	case ESOC_REG_CMD_ENG:
+		err = esoc_clink_register_cmd_eng(esoc_clink, &uhandle->eng);
+		if (err)
+			return err;
+		uhandle->cmd_eng_reg = true;
+		break;
+	case ESOC_CMD_EXE:
+		if (esoc_clink->cmd_eng != &uhandle->eng)
+			return -EACCES;
+		get_user(esoc_cmd, (u32 __user *)arg);
+		return clink_ops->cmd_exe(esoc_cmd, esoc_clink);
+	case ESOC_WAIT_FOR_REQ:
+		if (esoc_clink->req_eng != &uhandle->eng)
+			return -EACCES;
+		err = wait_event_interruptible(esoc_udev->req_wait,
+					!kfifo_is_empty(&esoc_udev->req_fifo));
+		if (!err) {
+			err = kfifo_out_spinlocked(&esoc_udev->req_fifo, &req,
+								sizeof(req),
+						&esoc_udev->req_fifo_lock);
+			if (err != sizeof(req)) {
+				pr_err("read from clink %s req q failed\n",
+							esoc_clink->name);
+				return -EIO;
+			}
+			put_user(req, (unsigned long __user *)uarg);
+
+		}
+		return err;
+	case ESOC_NOTIFY:
+		get_user(esoc_cmd, (u32 __user *)arg);
+		clink_ops->notify(esoc_cmd, esoc_clink);
+		break;
+	case ESOC_GET_STATUS:
+		err = clink_ops->get_status(&status, esoc_clink);
+		if (err)
+			return err;
+		put_user(status, (unsigned long __user *)uarg);
+		break;
+	case ESOC_WAIT_FOR_CRASH:
+		err = wait_event_interruptible(esoc_udev->evt_wait,
+					!kfifo_is_empty(&esoc_udev->evt_fifo));
+		if (!err) {
+			err = kfifo_out_spinlocked(&esoc_udev->evt_fifo, &evt,
+								sizeof(evt),
+						&esoc_udev->evt_fifo_lock);
+			if (err != sizeof(evt)) {
+				pr_err("read from clink %s evt q failed\n",
+							esoc_clink->name);
+				return -EIO;
+			}
+			put_user(evt, (unsigned long __user *)uarg);
+		}
+		return err;
+	default:
+		return -EINVAL;
+	};
+	return 0;
+}
+
+static int esoc_dev_open(struct inode *inode, struct file *file)
+{
+	struct esoc_uhandle *uhandle;
+	struct esoc_udev *esoc_udev;
+	struct esoc_clink *esoc_clink;
+	struct esoc_eng *eng;
+	unsigned int minor = iminor(inode);
+
+	esoc_udev = esoc_udev_get_by_minor(minor);
+	esoc_clink = get_esoc_clink(esoc_udev->clink->id);
+
+	uhandle = kzalloc(sizeof(*uhandle), GFP_KERNEL);
+	if (!uhandle) {
+		put_esoc_clink(esoc_clink);
+		return -ENOMEM;
+	}
+	uhandle->esoc_udev = esoc_udev;
+	uhandle->esoc_clink = esoc_clink;
+	eng = &uhandle->eng;
+	eng->handle_clink_req = esoc_udev_handle_clink_req;
+	eng->handle_clink_evt = esoc_udev_handle_clink_evt;
+	file->private_data = uhandle;
+	return 0;
+}
+
+static int esoc_dev_release(struct inode *inode, struct file *file)
+{
+	struct esoc_clink *esoc_clink;
+	struct esoc_uhandle *uhandle = file->private_data;
+
+	esoc_clink = uhandle->esoc_clink;
+	if (uhandle->req_eng_reg)
+		esoc_clink_unregister_req_eng(esoc_clink, &uhandle->eng);
+	if (uhandle->cmd_eng_reg)
+		esoc_clink_unregister_cmd_eng(esoc_clink, &uhandle->eng);
+	uhandle->req_eng_reg = false;
+	uhandle->cmd_eng_reg = false;
+	put_esoc_clink(esoc_clink);
+	kfree(uhandle);
+	return 0;
+}
+static const struct file_operations esoc_dev_fops = {
+	.owner		= THIS_MODULE,
+	.open		= esoc_dev_open,
+	.unlocked_ioctl = esoc_dev_ioctl,
+	.release	= esoc_dev_release,
+};
+
+int esoc_clink_add_device(struct device *dev, void *dummy)
+{
+	struct esoc_udev *esoc_udev;
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+
+	esoc_udev = get_free_esoc_udev(esoc_clink);
+	if (IS_ERR(esoc_udev))
+		return PTR_ERR(esoc_udev);
+	esoc_udev->dev = device_create(esoc_class, &esoc_clink->dev,
+					MKDEV(esoc_major, esoc_clink->id),
+					esoc_clink, "esoc-%d", esoc_clink->id);
+	if (IS_ERR(esoc_udev->dev)) {
+		pr_err("failed to create user device\n");
+		goto dev_err;
+	}
+	return 0;
+dev_err:
+	return_esoc_udev(esoc_udev);
+	return -ENODEV;
+}
+
+int esoc_clink_del_device(struct device *dev, void *dummy)
+{
+	struct esoc_udev *esoc_udev;
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+
+	esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
+	if (!esoc_udev)
+		return 0;
+	return_esoc_udev(esoc_udev);
+	device_destroy(esoc_class, MKDEV(esoc_major, esoc_clink->id));
+	return_esoc_udev(esoc_udev);
+	return 0;
+}
+
+static int esoc_dev_notifier_call(struct notifier_block *nb,
+					unsigned long action,
+					void *data)
+{
+	struct device *dev = data;
+
+	switch (action) {
+	case BUS_NOTIFY_ADD_DEVICE:
+		return esoc_clink_add_device(dev, NULL);
+	case BUS_NOTIFY_DEL_DEVICE:
+		return esoc_clink_del_device(dev, NULL);
+	};
+	return 0;
+}
+
+static struct notifier_block esoc_dev_notifier = {
+	.notifier_call = esoc_dev_notifier_call,
+};
+
+int __init esoc_dev_init(void)
+{
+	int ret = 0;
+
+	esoc_class = class_create(THIS_MODULE, "esoc-dev");
+
+	if (IS_ERR(esoc_class)) {
+		pr_err("coudn't create class");
+		return PTR_ERR(esoc_class);
+	}
+	esoc_major = register_chrdev(0, "esoc", &esoc_dev_fops);
+	if (esoc_major < 0) {
+		pr_err("failed to allocate char dev\n");
+		ret = esoc_major;
+		goto class_unreg;
+	}
+	ret = bus_register_notifier(&esoc_bus_type, &esoc_dev_notifier);
+	if (ret)
+		goto chrdev_unreg;
+	esoc_for_each_dev(NULL, esoc_clink_add_device);
+	return ret;
+chrdev_unreg:
+	unregister_chrdev(esoc_major, "esoc");
+class_unreg:
+	class_destroy(esoc_class);
+	return 0;
+}
+
+void __exit esoc_dev_exit(void)
+{
+	bus_unregister_notifier(&esoc_bus_type, &esoc_dev_notifier);
+	class_destroy(esoc_class);
+	unregister_chrdev(esoc_major, "esoc-dev");
+}
+
+MODULE_LICENSE("GPL v2");
+module_init(esoc_dev_init);
+module_exit(esoc_dev_exit);
diff --git a/drivers/esoc/mdm-dbg.h b/drivers/esoc/mdm-dbg.h
new file mode 100644
index 0000000..ae31339
--- /dev/null
+++ b/drivers/esoc/mdm-dbg.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+static bool debug_init_done;
+
+#ifndef CONFIG_ESOC_MDM_DBG_ENG
+
+static inline bool dbg_check_cmd_mask(unsigned int cmd)
+{
+	return false;
+}
+
+static inline bool dbg_check_notify_mask(unsigned int notify)
+{
+	return false;
+}
+
+static inline int mdm_dbg_eng_init(struct esoc_drv *drv)
+{
+	return 0;
+}
+
+#else
+extern bool dbg_check_cmd_mask(unsigned int cmd);
+extern bool dbg_check_notify_mask(unsigned int notify);
+extern int mdm_dbg_eng_init(struct esoc_drv *drv);
+#endif
+
+static inline bool mdm_dbg_stall_cmd(unsigned int cmd)
+{
+	if (debug_init_done)
+		return dbg_check_cmd_mask(cmd);
+	else
+		return false;
+}
+
+static inline bool mdm_dbg_stall_notify(unsigned int notify)
+{
+	if (debug_init_done)
+		return dbg_check_notify_mask(notify);
+	else
+		return false;
+}
+
+
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d011cb8..ed37e59 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -22,10 +22,6 @@
 
 if GPIOLIB
 
-config GPIO_DEVRES
-	def_bool y
-	depends on HAS_IOMEM
-
 config OF_GPIO
 	def_bool y
 	depends on OF
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 3dd1965..1b08983 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -2,7 +2,7 @@
 
 ccflags-$(CONFIG_DEBUG_GPIO)	+= -DDEBUG
 
-obj-$(CONFIG_GPIO_DEVRES)	+= devres.o
+obj-$(CONFIG_GPIOLIB)		+= devres.o
 obj-$(CONFIG_GPIOLIB)		+= gpiolib.o
 obj-$(CONFIG_GPIOLIB)		+= gpiolib-legacy.o
 obj-$(CONFIG_OF_GPIO)		+= gpiolib-of.o
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index e422568..fe731f0 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -372,14 +372,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
 
 	bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
 
-	memcpy(reg_val, chip->reg_output, NBANK(chip));
 	mutex_lock(&chip->i2c_lock);
+	memcpy(reg_val, chip->reg_output, NBANK(chip));
 	for (bank = 0; bank < NBANK(chip); bank++) {
 		bank_mask = mask[bank / sizeof(*mask)] >>
 			   ((bank % sizeof(*mask)) * 8);
 		if (bank_mask) {
 			bank_val = bits[bank / sizeof(*bits)] >>
 				  ((bank % sizeof(*bits)) * 8);
+			bank_val &= bank_mask;
 			reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val;
 		}
 	}
@@ -607,7 +608,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
 
 	if (client->irq && irq_base != -1
 			&& (chip->driver_data & PCA_INT)) {
-
 		ret = pca953x_read_regs(chip,
 					chip->regs->input, chip->irq_stat);
 		if (ret)
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 5a5a6cb..d6e21f1 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -97,7 +97,7 @@ static int tc3589x_gpio_get_direction(struct gpio_chip *chip,
 	if (ret < 0)
 		return ret;
 
-	return !!(ret & BIT(pos));
+	return !(ret & BIT(pos));
 }
 
 static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 93ed0e0..868128a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2737,8 +2737,11 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
 	if (IS_ERR(desc))
 		return PTR_ERR(desc);
 
-	/* Flush direction if something changed behind our back */
-	if (chip->get_direction) {
+	/*
+	 * If it's fast: flush the direction setting if something changed
+	 * behind our back
+	 */
+	if (!chip->can_sleep && chip->get_direction) {
 		int dir = chip->get_direction(chip, offset);
 
 		if (dir)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 039b57e..496f72b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -459,6 +459,7 @@ struct amdgpu_bo {
 	u64				metadata_flags;
 	void				*metadata;
 	u32				metadata_size;
+	unsigned			prime_shared_count;
 	/* list of all virtual address to which this bo
 	 * is associated to
 	 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 651115d..c02db01f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -132,7 +132,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
 		entry->priority = min(info[i].bo_priority,
 				      AMDGPU_BO_LIST_MAX_PRIORITY);
 		entry->tv.bo = &entry->robj->tbo;
-		entry->tv.shared = true;
+		entry->tv.shared = !entry->robj->prime_shared_count;
 
 		if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
 			gds_obj = entry->robj;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7ca07e7..3161d77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -658,12 +658,10 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
 		return false;
 
 	if (amdgpu_passthrough(adev)) {
-		/* for FIJI: In whole GPU pass-through virtualization case
-		 * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH)
-		 * so amdgpu_card_posted return false and driver will incorrectly skip vPost.
-		 * but if we force vPost do in pass-through case, the driver reload will hang.
-		 * whether doing vPost depends on amdgpu_card_posted if smc version is above
-		 * 00160e00 for FIJI.
+		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
+		 * some old smc fw still need driver do vPost otherwise gpu hang, while
+		 * those smc fw version above 22.15 doesn't have this flaw, so we force
+		 * vpost executed for smc version below 22.15
 		 */
 		if (adev->asic_type == CHIP_FIJI) {
 			int err;
@@ -674,22 +672,11 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
 				return true;
 
 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
-			if (fw_ver >= 0x00160e00)
-				return !amdgpu_card_posted(adev);
+			if (fw_ver < 0x00160e00)
+				return true;
 		}
-	} else {
-		/* in bare-metal case, amdgpu_card_posted return false
-		 * after system reboot/boot, and return true if driver
-		 * reloaded.
-		 * we shouldn't do vPost after driver reload otherwise GPU
-		 * could hang.
-		 */
-		if (amdgpu_card_posted(adev))
-			return false;
 	}
-
-	/* we assume vPost is neede for all other cases */
-	return true;
+	return !amdgpu_card_posted(adev);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 7700dc2..3826d5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -74,20 +74,36 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
 	if (ret)
 		return ERR_PTR(ret);
 
+	bo->prime_shared_count = 1;
 	return &bo->gem_base;
 }
 
 int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
 {
 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-	int ret = 0;
+	long ret = 0;
 
 	ret = amdgpu_bo_reserve(bo, false);
 	if (unlikely(ret != 0))
 		return ret;
 
+	/*
+	 * Wait for all shared fences to complete before we switch to future
+	 * use of exclusive fence on this prime shared bo.
+	 */
+	ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
+						  MAX_SCHEDULE_TIMEOUT);
+	if (unlikely(ret < 0)) {
+		DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
+		amdgpu_bo_unreserve(bo);
+		return ret;
+	}
+
 	/* pin buffer into GTT */
 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+	if (likely(ret == 0))
+		bo->prime_shared_count++;
+
 	amdgpu_bo_unreserve(bo);
 	return ret;
 }
@@ -102,6 +118,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
 		return;
 
 	amdgpu_bo_unpin(bo);
+	if (bo->prime_shared_count)
+		bo->prime_shared_count--;
 	amdgpu_bo_unreserve(bo);
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index b0c929dd..13f2b70 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1469,8 +1469,6 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
 						table_info->vddgfx_lookup_table, vv_id, &sclk)) {
 				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 							PHM_PlatformCaps_ClockStretcher)) {
-					if (table_info == NULL)
-						return -EINVAL;
 					sclk_table = table_info->vdd_dep_on_sclk;
 
 					for (j = 1; j < sclk_table->count; j++) {
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index b7a8b2a..b69c66b 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -14,170 +14,45 @@
  *
  */
 
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_crtc.h>
 #include <drm/drm_encoder_slave.h>
-#include <drm/drm_atomic_helper.h>
 
 #include "arcpgu.h"
 
-struct arcpgu_drm_connector {
-	struct drm_connector connector;
-	struct drm_encoder_slave *encoder_slave;
-};
-
-static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
-{
-	const struct drm_encoder_slave_funcs *sfuncs;
-	struct drm_encoder_slave *slave;
-	struct arcpgu_drm_connector *con =
-		container_of(connector, struct arcpgu_drm_connector, connector);
-
-	slave = con->encoder_slave;
-	if (slave == NULL) {
-		dev_err(connector->dev->dev,
-			"connector_get_modes: cannot find slave encoder for connector\n");
-		return 0;
-	}
-
-	sfuncs = slave->slave_funcs;
-	if (sfuncs->get_modes == NULL)
-		return 0;
-
-	return sfuncs->get_modes(&slave->base, connector);
-}
-
-static enum drm_connector_status
-arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
-{
-	enum drm_connector_status status = connector_status_unknown;
-	const struct drm_encoder_slave_funcs *sfuncs;
-	struct drm_encoder_slave *slave;
-
-	struct arcpgu_drm_connector *con =
-		container_of(connector, struct arcpgu_drm_connector, connector);
-
-	slave = con->encoder_slave;
-	if (slave == NULL) {
-		dev_err(connector->dev->dev,
-			"connector_detect: cannot find slave encoder for connector\n");
-		return status;
-	}
-
-	sfuncs = slave->slave_funcs;
-	if (sfuncs && sfuncs->detect)
-		return sfuncs->detect(&slave->base, connector);
-
-	dev_err(connector->dev->dev, "connector_detect: could not detect slave funcs\n");
-	return status;
-}
-
-static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
-{
-	drm_connector_unregister(connector);
-	drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_helper_funcs
-arcpgu_drm_connector_helper_funcs = {
-	.get_modes = arcpgu_drm_connector_get_modes,
-};
-
-static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
-	.reset = drm_atomic_helper_connector_reset,
-	.detect = arcpgu_drm_connector_detect,
-	.fill_modes = drm_helper_probe_single_connector_modes,
-	.destroy = arcpgu_drm_connector_destroy,
-	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static struct drm_encoder_helper_funcs arcpgu_drm_encoder_helper_funcs = {
-	.dpms = drm_i2c_encoder_dpms,
-	.mode_fixup = drm_i2c_encoder_mode_fixup,
-	.mode_set = drm_i2c_encoder_mode_set,
-	.prepare = drm_i2c_encoder_prepare,
-	.commit = drm_i2c_encoder_commit,
-	.detect = drm_i2c_encoder_detect,
-};
-
 static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
 	.destroy = drm_encoder_cleanup,
 };
 
 int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
 {
-	struct arcpgu_drm_connector *arcpgu_connector;
-	struct drm_i2c_encoder_driver *driver;
-	struct drm_encoder_slave *encoder;
-	struct drm_connector *connector;
-	struct i2c_client *i2c_slave;
-	int ret;
+	struct drm_encoder *encoder;
+	struct drm_bridge *bridge;
+
+	int ret = 0;
 
 	encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
 	if (encoder == NULL)
 		return -ENOMEM;
 
-	i2c_slave = of_find_i2c_device_by_node(np);
-	if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) {
-		dev_err(drm->dev, "failed to find i2c slave encoder\n");
+	/* Locate drm bridge from the hdmi encoder DT node */
+	bridge = of_drm_find_bridge(np);
+	if (!bridge)
 		return -EPROBE_DEFER;
-	}
 
-	if (i2c_slave->dev.driver == NULL) {
-		dev_err(drm->dev, "failed to find i2c slave driver\n");
-		return -EPROBE_DEFER;
-	}
-
-	driver =
-	    to_drm_i2c_encoder_driver(to_i2c_driver(i2c_slave->dev.driver));
-	ret = driver->encoder_init(i2c_slave, drm, encoder);
-	if (ret) {
-		dev_err(drm->dev, "failed to initialize i2c encoder slave\n");
-		return ret;
-	}
-
-	encoder->base.possible_crtcs = 1;
-	encoder->base.possible_clones = 0;
-	ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
+	encoder->possible_crtcs = 1;
+	encoder->possible_clones = 0;
+	ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs,
 			       DRM_MODE_ENCODER_TMDS, NULL);
 	if (ret)
 		return ret;
 
-	drm_encoder_helper_add(&encoder->base,
-			       &arcpgu_drm_encoder_helper_funcs);
+	/* Link drm_bridge to encoder */
+	bridge->encoder = encoder;
+	encoder->bridge = bridge;
 
-	arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
-					GFP_KERNEL);
-	if (!arcpgu_connector) {
-		ret = -ENOMEM;
-		goto error_encoder_cleanup;
-	}
+	ret = drm_bridge_attach(drm, bridge);
+	if (ret)
+		drm_encoder_cleanup(encoder);
 
-	connector = &arcpgu_connector->connector;
-	drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
-	ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
-			DRM_MODE_CONNECTOR_HDMIA);
-	if (ret < 0) {
-		dev_err(drm->dev, "failed to initialize drm connector\n");
-		goto error_encoder_cleanup;
-	}
-
-	ret = drm_mode_connector_attach_encoder(connector, &encoder->base);
-	if (ret < 0) {
-		dev_err(drm->dev, "could not attach connector to encoder\n");
-		drm_connector_unregister(connector);
-		goto error_connector_cleanup;
-	}
-
-	arcpgu_connector->encoder_slave = encoder;
-
-	return 0;
-
-error_connector_cleanup:
-	drm_connector_cleanup(connector);
-
-error_encoder_cleanup:
-	drm_encoder_cleanup(&encoder->base);
 	return ret;
 }
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index b2d5e18..deb5743 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -25,8 +25,13 @@
 static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
 					  struct drm_crtc_state *old_crtc_state)
 {
+	struct drm_device *dev = crtc->dev;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
 	struct drm_pending_vblank_event *event = crtc->state->event;
 
+	regmap_write(fsl_dev->regmap,
+		     DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
+
 	if (event) {
 		crtc->state->event = NULL;
 
@@ -39,11 +44,15 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
 	}
 }
 
-static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
+static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+					struct drm_crtc_state *old_crtc_state)
 {
 	struct drm_device *dev = crtc->dev;
 	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
 
+	/* always disable planes on the CRTC */
+	drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
+
 	drm_crtc_vblank_off(crtc);
 
 	regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
@@ -122,8 +131,8 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
 }
 
 static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
+	.atomic_disable = fsl_dcu_drm_crtc_atomic_disable,
 	.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
-	.disable = fsl_dcu_drm_disable_crtc,
 	.enable = fsl_dcu_drm_crtc_enable,
 	.mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
 };
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index e04efbe..cc2fde2 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -59,8 +59,6 @@ static int fsl_dcu_drm_irq_init(struct drm_device *dev)
 
 	regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
 	regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0);
-	regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
-		     DCU_UPDATE_MODE_READREG);
 
 	return ret;
 }
@@ -139,8 +137,6 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
 		drm_handle_vblank(dev, 0);
 
 	regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status);
-	regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
-		     DCU_UPDATE_MODE_READREG);
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 9e6f7d8..a99f488 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -160,11 +160,6 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
 			     DCU_LAYER_POST_SKIP(0) |
 			     DCU_LAYER_PRE_SKIP(0));
 	}
-	regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
-			   DCU_MODE_DCU_MODE_MASK,
-			   DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
-	regmap_write(fsl_dev->regmap,
-		     DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
 
 	return;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7adb4c7..a218c2e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1281,6 +1281,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
 	return ctx;
 }
 
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+	return !(obj->cache_level == I915_CACHE_NONE ||
+		 obj->cache_level == I915_CACHE_WT);
+}
+
 void i915_vma_move_to_active(struct i915_vma *vma,
 			     struct drm_i915_gem_request *req,
 			     unsigned int flags)
@@ -1311,6 +1317,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 
 		/* update for the implicit flush after a batch */
 		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+		if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
+			obj->cache_dirty = true;
 	}
 
 	if (flags & EXEC_OBJECT_NEEDS_FENCE)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1f8af87..cf25607 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1143,7 +1143,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 	if (!child)
 		return;
 
-	aux_channel = child->raw[25];
+	aux_channel = child->common.aux_channel;
 	ddc_pin = child->common.ddc_pin;
 
 	is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
@@ -1673,7 +1673,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
 	return false;
 }
 
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
+static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
+				      enum port port)
 {
 	static const struct {
 		u16 dp, hdmi;
@@ -1687,22 +1688,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
 		[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
 		[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
 	};
-	int i;
 
 	if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
 		return false;
 
-	if (!dev_priv->vbt.child_dev_num)
+	if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
+	    (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
 		return false;
 
+	if (p_child->common.dvo_port == port_mapping[port].dp)
+		return true;
+
+	/* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
+	if (p_child->common.dvo_port == port_mapping[port].hdmi &&
+	    p_child->common.aux_channel != 0)
+		return true;
+
+	return false;
+}
+
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
+				     enum port port)
+{
+	int i;
+
 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
 		const union child_device_config *p_child =
 			&dev_priv->vbt.child_dev[i];
 
-		if ((p_child->common.dvo_port == port_mapping[port].dp ||
-		     p_child->common.dvo_port == port_mapping[port].hdmi) &&
-		    (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
-		    (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+		if (child_dev_is_dp_dual_mode(p_child, port))
 			return true;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 3581b5a..bf344d0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4463,21 +4463,11 @@ static enum drm_connector_status
 intel_dp_detect(struct drm_connector *connector, bool force)
 {
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
-	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct intel_encoder *intel_encoder = &intel_dig_port->base;
 	enum drm_connector_status status = connector->status;
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
 		      connector->base.id, connector->name);
 
-	if (intel_dp->is_mst) {
-		/* MST devices are disconnected from a monitor POV */
-		intel_dp_unset_edid(intel_dp);
-		if (intel_encoder->type != INTEL_OUTPUT_EDP)
-			intel_encoder->type = INTEL_OUTPUT_DP;
-		return connector_status_disconnected;
-	}
-
 	/* If full detect is not performed yet, do a full detect */
 	if (!intel_dp->detect_done)
 		status = intel_dp_long_pulse(intel_dp->attached_connector);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 73a521f..dbed12c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -358,7 +358,7 @@ vlv_update_plane(struct drm_plane *dplane,
 	int plane = intel_plane->plane;
 	u32 sprctl;
 	u32 sprsurf_offset, linear_offset;
-	unsigned int rotation = dplane->state->rotation;
+	unsigned int rotation = plane_state->base.rotation;
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 	int crtc_x = plane_state->base.dst.x1;
 	int crtc_y = plane_state->base.dst.y1;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 68db962..8886cab1 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -280,7 +280,8 @@ struct common_child_dev_config {
 	u8 dp_support:1;
 	u8 tmds_support:1;
 	u8 support_reserved:5;
-	u8 not_common3[12];
+	u8 aux_channel;
+	u8 not_common3[11];
 	u8 iboost_level;
 } __packed;
 
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 019b7ca..f75c5b5 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -80,6 +80,7 @@ static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
 						 ddp_comp);
 
 	priv->crtc = crtc;
+	writel(0x0, comp->regs + DISP_REG_OVL_INTSTA);
 	writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
 }
 
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 0186e50..90fb831 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -432,11 +432,16 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
 	unsigned long pll_rate;
 	unsigned int factor;
 
+	/* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */
 	pix_rate = 1000UL * mode->clock;
-	if (mode->clock <= 74000)
+	if (mode->clock <= 27000)
+		factor = 16 * 3;
+	else if (mode->clock <= 84000)
 		factor = 8 * 3;
-	else
+	else if (mode->clock <= 167000)
 		factor = 4 * 3;
+	else
+		factor = 2 * 3;
 	pll_rate = pix_rate * factor;
 
 	dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 71227de..0e8c4d9 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1133,12 +1133,6 @@ static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
 	phy_power_on(hdmi->phy);
 	mtk_hdmi_aud_output_config(hdmi, mode);
 
-	mtk_hdmi_setup_audio_infoframe(hdmi);
-	mtk_hdmi_setup_avi_infoframe(hdmi, mode);
-	mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
-	if (mode->flags & DRM_MODE_FLAG_3D_MASK)
-		mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
-
 	mtk_hdmi_hw_vid_black(hdmi, false);
 	mtk_hdmi_hw_aud_unmute(hdmi);
 	mtk_hdmi_hw_send_av_unmute(hdmi);
@@ -1401,6 +1395,16 @@ static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
 	hdmi->powered = true;
 }
 
+static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
+				    struct drm_display_mode *mode)
+{
+	mtk_hdmi_setup_audio_infoframe(hdmi);
+	mtk_hdmi_setup_avi_infoframe(hdmi, mode);
+	mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
+	if (mode->flags & DRM_MODE_FLAG_3D_MASK)
+		mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
+}
+
 static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
 {
 	struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1409,6 +1413,7 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
 	clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
 	clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
 	phy_power_on(hdmi->phy);
+	mtk_hdmi_send_infoframe(hdmi, &hdmi->mode);
 
 	hdmi->enabled = true;
 }
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
index 8a24754..51cb9cf 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -265,6 +265,9 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
 	struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
 	unsigned int pre_div;
 	unsigned int div;
+	unsigned int pre_ibias;
+	unsigned int hdmi_ibias;
+	unsigned int imp_en;
 
 	dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
 		rate, parent_rate);
@@ -298,18 +301,31 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
 			  (0x1 << PLL_BR_SHIFT),
 			  RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
 			  RG_HDMITX_PLL_BR);
-	mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
+	if (rate < 165000000) {
+		mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
+					RG_HDMITX_PRD_IMP_EN);
+		pre_ibias = 0x3;
+		imp_en = 0x0;
+		hdmi_ibias = hdmi_phy->ibias;
+	} else {
+		mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
+				      RG_HDMITX_PRD_IMP_EN);
+		pre_ibias = 0x6;
+		imp_en = 0xf;
+		hdmi_ibias = hdmi_phy->ibias_up;
+	}
 	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
-			  (0x3 << PRD_IBIAS_CLK_SHIFT) |
-			  (0x3 << PRD_IBIAS_D2_SHIFT) |
-			  (0x3 << PRD_IBIAS_D1_SHIFT) |
-			  (0x3 << PRD_IBIAS_D0_SHIFT),
+			  (pre_ibias << PRD_IBIAS_CLK_SHIFT) |
+			  (pre_ibias << PRD_IBIAS_D2_SHIFT) |
+			  (pre_ibias << PRD_IBIAS_D1_SHIFT) |
+			  (pre_ibias << PRD_IBIAS_D0_SHIFT),
 			  RG_HDMITX_PRD_IBIAS_CLK |
 			  RG_HDMITX_PRD_IBIAS_D2 |
 			  RG_HDMITX_PRD_IBIAS_D1 |
 			  RG_HDMITX_PRD_IBIAS_D0);
 	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
-			  (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN);
+			  (imp_en << DRV_IMP_EN_SHIFT),
+			  RG_HDMITX_DRV_IMP_EN);
 	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
 			  (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
 			  (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
@@ -318,12 +334,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
 			  RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
 			  RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
 	mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
-			  (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) |
-			  (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) |
-			  (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) |
-			  (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT),
-			  RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 |
-			  RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0);
+			  (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) |
+			  (hdmi_ibias << DRV_IBIAS_D2_SHIFT) |
+			  (hdmi_ibias << DRV_IBIAS_D1_SHIFT) |
+			  (hdmi_ibias << DRV_IBIAS_D0_SHIFT),
+			  RG_HDMITX_DRV_IBIAS_CLK |
+			  RG_HDMITX_DRV_IBIAS_D2 |
+			  RG_HDMITX_DRV_IBIAS_D1 |
+			  RG_HDMITX_DRV_IBIAS_D0);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
new file mode 100644
index 0000000..114998f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "msm-dsi-catalog:[%s] " fmt, __func__
+#include <linux/errno.h>
+
+#include "dsi_catalog.h"
+
+/**
+ * dsi_catalog_14_init() - catalog init for dsi controller v1.4
+ */
+static void dsi_catalog_14_init(struct dsi_ctrl_hw *ctrl)
+{
+	ctrl->ops.host_setup             = dsi_ctrl_hw_14_host_setup;
+	ctrl->ops.setup_lane_map         = dsi_ctrl_hw_14_setup_lane_map;
+	ctrl->ops.video_engine_en        = dsi_ctrl_hw_14_video_engine_en;
+	ctrl->ops.video_engine_setup     = dsi_ctrl_hw_14_video_engine_setup;
+	ctrl->ops.set_video_timing       = dsi_ctrl_hw_14_set_video_timing;
+	ctrl->ops.cmd_engine_setup       = dsi_ctrl_hw_14_cmd_engine_setup;
+	ctrl->ops.ctrl_en                = dsi_ctrl_hw_14_ctrl_en;
+	ctrl->ops.cmd_engine_en          = dsi_ctrl_hw_14_cmd_engine_en;
+	ctrl->ops.phy_sw_reset           = dsi_ctrl_hw_14_phy_sw_reset;
+	ctrl->ops.soft_reset             = dsi_ctrl_hw_14_soft_reset;
+	ctrl->ops.kickoff_command        = dsi_ctrl_hw_14_kickoff_command;
+	ctrl->ops.kickoff_fifo_command   = dsi_ctrl_hw_14_kickoff_fifo_command;
+	ctrl->ops.reset_cmd_fifo         = dsi_ctrl_hw_14_reset_cmd_fifo;
+	ctrl->ops.trigger_command_dma    = dsi_ctrl_hw_14_trigger_command_dma;
+	ctrl->ops.ulps_request           = dsi_ctrl_hw_14_ulps_request;
+	ctrl->ops.ulps_exit              = dsi_ctrl_hw_14_ulps_exit;
+	ctrl->ops.clear_ulps_request     = dsi_ctrl_hw_14_clear_ulps_request;
+	ctrl->ops.get_lanes_in_ulps      = dsi_ctrl_hw_14_get_lanes_in_ulps;
+	ctrl->ops.clamp_enable           = dsi_ctrl_hw_14_clamp_enable;
+	ctrl->ops.clamp_disable          = dsi_ctrl_hw_14_clamp_disable;
+	ctrl->ops.get_interrupt_status   = dsi_ctrl_hw_14_get_interrupt_status;
+	ctrl->ops.get_error_status       = dsi_ctrl_hw_14_get_error_status;
+	ctrl->ops.clear_error_status     = dsi_ctrl_hw_14_clear_error_status;
+	ctrl->ops.clear_interrupt_status =
+		dsi_ctrl_hw_14_clear_interrupt_status;
+	ctrl->ops.enable_status_interrupts =
+		dsi_ctrl_hw_14_enable_status_interrupts;
+	ctrl->ops.enable_error_interrupts =
+		dsi_ctrl_hw_14_enable_error_interrupts;
+	ctrl->ops.video_test_pattern_setup =
+		dsi_ctrl_hw_14_video_test_pattern_setup;
+	ctrl->ops.cmd_test_pattern_setup =
+		dsi_ctrl_hw_14_cmd_test_pattern_setup;
+	ctrl->ops.test_pattern_enable    = dsi_ctrl_hw_14_test_pattern_enable;
+	ctrl->ops.trigger_cmd_test_pattern =
+		dsi_ctrl_hw_14_trigger_cmd_test_pattern;
+}
+
+/**
+ * dsi_catalog_20_init() - catalog init for dsi controller v2.0
+ */
+static void dsi_catalog_20_init(struct dsi_ctrl_hw *ctrl)
+{
+	set_bit(DSI_CTRL_CPHY, ctrl->feature_map);
+}
+
+/**
+ * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
+ * @ctrl:        Pointer to DSI controller hw object.
+ * @version:     DSI controller version.
+ * @index:       DSI controller instance ID.
+ *
+ * This function setups the catalog information in the dsi_ctrl_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
+			   enum dsi_ctrl_version version,
+			   u32 index)
+{
+	int rc = 0;
+
+	if (version == DSI_CTRL_VERSION_UNKNOWN ||
+	    version >= DSI_CTRL_VERSION_MAX) {
+		pr_err("Unsupported version: %d\n", version);
+		return -ENOTSUPP;
+	}
+
+	ctrl->index = index;
+	set_bit(DSI_CTRL_VIDEO_TPG, ctrl->feature_map);
+	set_bit(DSI_CTRL_CMD_TPG, ctrl->feature_map);
+	set_bit(DSI_CTRL_VARIABLE_REFRESH_RATE, ctrl->feature_map);
+	set_bit(DSI_CTRL_DYNAMIC_REFRESH, ctrl->feature_map);
+	set_bit(DSI_CTRL_DESKEW_CALIB, ctrl->feature_map);
+	set_bit(DSI_CTRL_DPHY, ctrl->feature_map);
+
+	switch (version) {
+	case DSI_CTRL_VERSION_1_4:
+		dsi_catalog_14_init(ctrl);
+		break;
+	case DSI_CTRL_VERSION_2_0:
+		dsi_catalog_20_init(ctrl);
+		break;
+	default:
+		return -ENOTSUPP;
+	}
+
+	return rc;
+}
+
+/**
+ * dsi_catalog_phy_4_0_init() - catalog init for DSI PHY v4.0
+ */
+static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy)
+{
+	phy->ops.regulator_enable = dsi_phy_hw_v4_0_regulator_enable;
+	phy->ops.regulator_disable = dsi_phy_hw_v4_0_regulator_disable;
+	phy->ops.enable = dsi_phy_hw_v4_0_enable;
+	phy->ops.disable = dsi_phy_hw_v4_0_disable;
+	phy->ops.calculate_timing_params =
+		dsi_phy_hw_v4_0_calculate_timing_params;
+}
+
+/**
+ * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
+ * @ctrl:        Pointer to DSI PHY hw object.
+ * @version:     DSI PHY version.
+ * @index:       DSI PHY instance ID.
+ *
+ * This function setups the catalog information in the dsi_phy_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
+			  enum dsi_phy_version version,
+			  u32 index)
+{
+	int rc = 0;
+
+	if (version == DSI_PHY_VERSION_UNKNOWN ||
+	    version >= DSI_PHY_VERSION_MAX) {
+		pr_err("Unsupported version: %d\n", version);
+		return -ENOTSUPP;
+	}
+
+	phy->index = index;
+	set_bit(DSI_PHY_DPHY, phy->feature_map);
+
+	switch (version) {
+	case DSI_PHY_VERSION_4_0:
+		dsi_catalog_phy_4_0_init(phy);
+		break;
+	case DSI_PHY_VERSION_1_0:
+	case DSI_PHY_VERSION_2_0:
+	case DSI_PHY_VERSION_3_0:
+	default:
+		return -ENOTSUPP;
+	}
+
+	return rc;
+}
+
+
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
new file mode 100644
index 0000000..e4b33c2
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CATALOG_H_
+#define _DSI_CATALOG_H_
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_phy_hw.h"
+
+/**
+ * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
+ * @ctrl:        Pointer to DSI controller hw object.
+ * @version:     DSI controller version.
+ * @index:       DSI controller instance ID.
+ *
+ * This function setups the catalog information in the dsi_ctrl_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
+			   enum dsi_ctrl_version version,
+			   u32 index);
+
+/**
+ * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
+ * @ctrl:        Pointer to DSI PHY hw object.
+ * @version:     DSI PHY version.
+ * @index:       DSI PHY instance ID.
+ *
+ * This function setups the catalog information in the dsi_phy_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
+			  enum dsi_phy_version version,
+			  u32 index);
+
+/* Definitions for 4.0 PHY hardware driver */
+void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy,
+				      struct dsi_phy_per_lane_cfgs *cfg);
+void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy);
+void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy);
+int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy,
+					    struct dsi_mode_info *mode,
+					    struct dsi_host_common_cfg *cfg,
+					   struct dsi_phy_per_lane_cfgs
+					   *timing);
+
+/* Definitions for 1.4 controller hardware driver */
+void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl,
+			       struct dsi_host_common_cfg *config);
+void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
+void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl,
+				       struct dsi_host_common_cfg *common_cfg,
+				       struct dsi_video_engine_cfg *cfg);
+void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl,
+			 struct dsi_mode_info *mode);
+
+void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
+				     struct dsi_host_common_cfg *common_cfg,
+				     struct dsi_cmd_engine_cfg *cfg);
+
+void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on);
+void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
+
+void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
+		       struct dsi_lane_mapping *lane_map);
+void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl,
+			struct dsi_ctrl_cmd_dma_info *cmd,
+			u32 flags);
+
+void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
+			     struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+			     u32 flags);
+void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
+void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes);
+void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
+u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
+				 u32 lanes,
+				 bool enable_ulps);
+
+void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
+				  u32 lanes,
+				  bool disable_ulps);
+u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints);
+void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl,
+					     u32 ints);
+
+u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors);
+void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
+					    u64 errors);
+
+void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+				 enum dsi_test_pattern type,
+				 u32 init_val);
+void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+			       enum dsi_test_pattern  type,
+			       u32 init_val,
+			       u32 stream_id);
+void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl, bool enable);
+void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
+				 u32 stream_id);
+#endif /* _DSI_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
new file mode 100644
index 0000000..b5ddfbb
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -0,0 +1,558 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_HW_H_
+#define _DSI_CTRL_HW_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+
+#include "dsi_defs.h"
+
+/**
+ * Modifier flag for command transmission. If this flag is set, command
+ * information is programmed to hardware and transmission is not triggered.
+ * Caller should call the trigger_command_dma() to start the transmission. This
+ * flag is valed for kickoff_command() and kickoff_fifo_command() operations.
+ */
+#define DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER            0x1
+
+/**
+ * enum dsi_ctrl_version - version of the dsi host controller
+ * @DSI_CTRL_VERSION_UNKNOWN: Unknown controller version
+ * @DSI_CTRL_VERSION_1_4:     DSI host v1.4 controller
+ * @DSI_CTRL_VERSION_2_0:     DSI host v2.0 controller
+ * @DSI_CTRL_VERSION_MAX:     max version
+ */
+enum dsi_ctrl_version {
+	DSI_CTRL_VERSION_UNKNOWN,
+	DSI_CTRL_VERSION_1_4,
+	DSI_CTRL_VERSION_2_0,
+	DSI_CTRL_VERSION_MAX
+};
+
+/**
+ * enum dsi_ctrl_hw_features - features supported by dsi host controller
+ * @DSI_CTRL_VIDEO_TPG:               Test pattern support for video mode.
+ * @DSI_CTRL_CMD_TPG:                 Test pattern support for command mode.
+ * @DSI_CTRL_VARIABLE_REFRESH_RATE:   variable panel timing
+ * @DSI_CTRL_DYNAMIC_REFRESH:         variable pixel clock rate
+ * @DSI_CTRL_NULL_PACKET_INSERTION:   NULL packet insertion
+ * @DSI_CTRL_DESKEW_CALIB:            Deskew calibration support
+ * @DSI_CTRL_DPHY:                    Controller support for DPHY
+ * @DSI_CTRL_CPHY:                    Controller support for CPHY
+ * @DSI_CTRL_MAX_FEATURES:
+ */
+enum dsi_ctrl_hw_features {
+	DSI_CTRL_VIDEO_TPG,
+	DSI_CTRL_CMD_TPG,
+	DSI_CTRL_VARIABLE_REFRESH_RATE,
+	DSI_CTRL_DYNAMIC_REFRESH,
+	DSI_CTRL_NULL_PACKET_INSERTION,
+	DSI_CTRL_DESKEW_CALIB,
+	DSI_CTRL_DPHY,
+	DSI_CTRL_CPHY,
+	DSI_CTRL_MAX_FEATURES
+};
+
+/**
+ * enum dsi_test_pattern - test pattern type
+ * @DSI_TEST_PATTERN_FIXED:     Test pattern is fixed, based on init value.
+ * @DSI_TEST_PATTERN_INC:       Incremental test pattern, base on init value.
+ * @DSI_TEST_PATTERN_POLY:      Pattern generated from polynomial and init val.
+ * @DSI_TEST_PATTERN_MAX:
+ */
+enum dsi_test_pattern {
+	DSI_TEST_PATTERN_FIXED = 0,
+	DSI_TEST_PATTERN_INC,
+	DSI_TEST_PATTERN_POLY,
+	DSI_TEST_PATTERN_MAX
+};
+
+/**
+ * enum dsi_status_int_type - status interrupts generated by DSI controller
+ * @DSI_CMD_MODE_DMA_DONE:        Command mode DMA packets are sent out.
+ * @DSI_CMD_STREAM0_FRAME_DONE:   A frame of command mode stream0 is sent out.
+ * @DSI_CMD_STREAM1_FRAME_DONE:   A frame of command mode stream1 is sent out.
+ * @DSI_CMD_STREAM2_FRAME_DONE:   A frame of command mode stream2 is sent out.
+ * @DSI_VIDEO_MODE_FRAME_DONE:    A frame of video mode stream is sent out.
+ * @DSI_BTA_DONE:                 A BTA is completed.
+ * @DSI_CMD_FRAME_DONE:           A frame of selected command mode stream is
+ *                                sent out by MDP.
+ * @DSI_DYN_REFRESH_DONE:         The dynamic refresh operation has completed.
+ * @DSI_DESKEW_DONE:              The deskew calibration operation has completed
+ * @DSI_DYN_BLANK_DMA_DONE:       The dynamic blankin DMA operation has
+ *                                completed.
+ */
+enum dsi_status_int_type {
+	DSI_CMD_MODE_DMA_DONE = BIT(0),
+	DSI_CMD_STREAM0_FRAME_DONE = BIT(1),
+	DSI_CMD_STREAM1_FRAME_DONE = BIT(2),
+	DSI_CMD_STREAM2_FRAME_DONE = BIT(3),
+	DSI_VIDEO_MODE_FRAME_DONE = BIT(4),
+	DSI_BTA_DONE = BIT(5),
+	DSI_CMD_FRAME_DONE = BIT(6),
+	DSI_DYN_REFRESH_DONE = BIT(7),
+	DSI_DESKEW_DONE = BIT(8),
+	DSI_DYN_BLANK_DMA_DONE = BIT(9)
+};
+
+/**
+ * enum dsi_error_int_type - error interrupts generated by DSI controller
+ * @DSI_RDBK_SINGLE_ECC_ERR:        Single bit ECC error in read packet.
+ * @DSI_RDBK_MULTI_ECC_ERR:         Multi bit ECC error in read packet.
+ * @DSI_RDBK_CRC_ERR:               CRC error in read packet.
+ * @DSI_RDBK_INCOMPLETE_PKT:        Incomplete read packet.
+ * @DSI_PERIPH_ERROR_PKT:           Error packet returned from peripheral,
+ * @DSI_LP_RX_TIMEOUT:              Low power reverse transmission timeout.
+ * @DSI_HS_TX_TIMEOUT:              High speed forward transmission timeout.
+ * @DSI_BTA_TIMEOUT:                BTA timeout.
+ * @DSI_PLL_UNLOCK:                 PLL has unlocked.
+ * @DSI_DLN0_ESC_ENTRY_ERR:         Incorrect LP Rx escape entry.
+ * @DSI_DLN0_ESC_SYNC_ERR:          LP Rx data is not byte aligned.
+ * @DSI_DLN0_LP_CONTROL_ERR:        Incorrect LP Rx state sequence.
+ * @DSI_PENDING_HS_TX_TIMEOUT:      Pending High-speed transfer timeout.
+ * @DSI_INTERLEAVE_OP_CONTENTION:   Interleave operation contention.
+ * @DSI_CMD_DMA_FIFO_UNDERFLOW:     Command mode DMA FIFO underflow.
+ * @DSI_CMD_MDP_FIFO_UNDERFLOW:     Command MDP FIFO underflow (failed to
+ *                                  receive one complete line from MDP).
+ * @DSI_DLN0_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 0 overflows.
+ * @DSI_DLN1_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 1 overflows.
+ * @DSI_DLN2_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 2 overflows.
+ * @DSI_DLN3_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 3 overflows.
+ * @DSI_DLN0_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 0 underflows.
+ * @DSI_DLN1_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 1 underflows.
+ * @DSI_DLN2_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 2 underflows.
+ * @DSI_DLN3_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 3 undeflows.
+ * @DSI_DLN0_LP0_CONTENTION:        PHY level contention while lane 0 is low.
+ * @DSI_DLN1_LP0_CONTENTION:        PHY level contention while lane 1 is low.
+ * @DSI_DLN2_LP0_CONTENTION:        PHY level contention while lane 2 is low.
+ * @DSI_DLN3_LP0_CONTENTION:        PHY level contention while lane 3 is low.
+ * @DSI_DLN0_LP1_CONTENTION:        PHY level contention while lane 0 is high.
+ * @DSI_DLN1_LP1_CONTENTION:        PHY level contention while lane 1 is high.
+ * @DSI_DLN2_LP1_CONTENTION:        PHY level contention while lane 2 is high.
+ * @DSI_DLN3_LP1_CONTENTION:        PHY level contention while lane 3 is high.
+ */
+enum dsi_error_int_type {
+	DSI_RDBK_SINGLE_ECC_ERR = BIT(0),
+	DSI_RDBK_MULTI_ECC_ERR = BIT(1),
+	DSI_RDBK_CRC_ERR = BIT(2),
+	DSI_RDBK_INCOMPLETE_PKT = BIT(3),
+	DSI_PERIPH_ERROR_PKT = BIT(4),
+	DSI_LP_RX_TIMEOUT = BIT(5),
+	DSI_HS_TX_TIMEOUT = BIT(6),
+	DSI_BTA_TIMEOUT = BIT(7),
+	DSI_PLL_UNLOCK = BIT(8),
+	DSI_DLN0_ESC_ENTRY_ERR = BIT(9),
+	DSI_DLN0_ESC_SYNC_ERR = BIT(10),
+	DSI_DLN0_LP_CONTROL_ERR = BIT(11),
+	DSI_PENDING_HS_TX_TIMEOUT = BIT(12),
+	DSI_INTERLEAVE_OP_CONTENTION = BIT(13),
+	DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(14),
+	DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(15),
+	DSI_DLN0_HS_FIFO_OVERFLOW = BIT(16),
+	DSI_DLN1_HS_FIFO_OVERFLOW = BIT(17),
+	DSI_DLN2_HS_FIFO_OVERFLOW = BIT(18),
+	DSI_DLN3_HS_FIFO_OVERFLOW = BIT(19),
+	DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(20),
+	DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(21),
+	DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(22),
+	DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(23),
+	DSI_DLN0_LP0_CONTENTION = BIT(24),
+	DSI_DLN1_LP0_CONTENTION = BIT(25),
+	DSI_DLN2_LP0_CONTENTION = BIT(26),
+	DSI_DLN3_LP0_CONTENTION = BIT(27),
+	DSI_DLN0_LP1_CONTENTION = BIT(28),
+	DSI_DLN1_LP1_CONTENTION = BIT(29),
+	DSI_DLN2_LP1_CONTENTION = BIT(30),
+	DSI_DLN3_LP1_CONTENTION = BIT(31),
+};
+
+/**
+ * struct dsi_ctrl_cmd_dma_info - command buffer information
+ * @offset:        IOMMU VA for command buffer address.
+ * @length:        Length of the command buffer.
+ * @en_broadcast:  Enable broadcast mode if set to true.
+ * @is_master:     Is master in broadcast mode.
+ * @use_lpm:       Use low power mode for command transmission.
+ */
+struct dsi_ctrl_cmd_dma_info {
+	u32 offset;
+	u32 length;
+	bool en_broadcast;
+	bool is_master;
+	bool use_lpm;
+};
+
+/**
+ * struct dsi_ctrl_cmd_dma_fifo_info - command payload tp be sent using FIFO
+ * @command:        VA for command buffer.
+ * @size:           Size of the command buffer.
+ * @en_broadcast:   Enable broadcast mode if set to true.
+ * @is_master:      Is master in broadcast mode.
+ * @use_lpm:        Use low power mode for command transmission.
+ */
+struct dsi_ctrl_cmd_dma_fifo_info {
+	u32 *command;
+	u32 size;
+	bool en_broadcast;
+	bool is_master;
+	bool use_lpm;
+};
+
+struct dsi_ctrl_hw;
+
+/**
+ * struct dsi_ctrl_hw_ops - operations supported by dsi host hardware
+ */
+struct dsi_ctrl_hw_ops {
+
+	/**
+	 * host_setup() - Setup DSI host configuration
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @config:        Configuration for DSI host controller
+	 */
+	void (*host_setup)(struct dsi_ctrl_hw *ctrl,
+			   struct dsi_host_common_cfg *config);
+
+	/**
+	 * video_engine_en() - enable DSI video engine
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @on:            Enable/disabel video engine.
+	 */
+	void (*video_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+	/**
+	 * video_engine_setup() - Setup dsi host controller for video mode
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @common_cfg:    Common configuration parameters.
+	 * @cfg:           Video mode configuration.
+	 *
+	 * Set up DSI video engine with a specific configuration. Controller and
+	 * video engine are not enabled as part of this function.
+	 */
+	void (*video_engine_setup)(struct dsi_ctrl_hw *ctrl,
+				   struct dsi_host_common_cfg *common_cfg,
+				   struct dsi_video_engine_cfg *cfg);
+
+	/**
+	 * set_video_timing() - set up the timing for video frame
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @mode:          Video mode information.
+	 *
+	 * Set up the video timing parameters for the DSI video mode operation.
+	 */
+	void (*set_video_timing)(struct dsi_ctrl_hw *ctrl,
+				 struct dsi_mode_info *mode);
+
+	/**
+	 * cmd_engine_setup() - setup dsi host controller for command mode
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @common_cfg:    Common configuration parameters.
+	 * @cfg:           Command mode configuration.
+	 *
+	 * Setup DSI CMD engine with a specific configuration. Controller and
+	 * command engine are not enabled as part of this function.
+	 */
+	void (*cmd_engine_setup)(struct dsi_ctrl_hw *ctrl,
+				 struct dsi_host_common_cfg *common_cfg,
+				 struct dsi_cmd_engine_cfg *cfg);
+
+	/**
+	 * ctrl_en() - enable DSI controller engine
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @on:            turn on/off the DSI controller engine.
+	 */
+	void (*ctrl_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+	/**
+	 * cmd_engine_en() - enable DSI controller command engine
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @on:            Turn on/off the DSI command engine.
+	 */
+	void (*cmd_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+	/**
+	 * phy_sw_reset() - perform a soft reset on the PHY.
+	 * @ctrl:        Pointer to the controller host hardware.
+	 */
+	void (*phy_sw_reset)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * soft_reset() - perform a soft reset on DSI controller
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * The video, command and controller engines will be disable before the
+	 * reset is triggered. These engines will not be enabled after the reset
+	 * is complete. Caller must re-enable the engines.
+	 *
+	 * If the reset is done while MDP timing engine is turned on, the video
+	 * enigne should be re-enabled only during the vertical blanking time.
+	 */
+	void (*soft_reset)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * setup_lane_map() - setup mapping between logical and physical lanes
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @lane_map:      Structure defining the mapping between DSI logical
+	 *                 lanes and physical lanes.
+	 */
+	void (*setup_lane_map)(struct dsi_ctrl_hw *ctrl,
+			       struct dsi_lane_mapping *lane_map);
+
+	/**
+	 * kickoff_command() - transmits commands stored in memory
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @cmd:           Command information.
+	 * @flags:         Modifiers for command transmission.
+	 *
+	 * The controller hardware is programmed with address and size of the
+	 * command buffer. The transmission is kicked off if
+	 * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+	 * set, caller should make a separate call to trigger_command_dma() to
+	 * transmit the command.
+	 */
+	void (*kickoff_command)(struct dsi_ctrl_hw *ctrl,
+				struct dsi_ctrl_cmd_dma_info *cmd,
+				u32 flags);
+
+	/**
+	 * kickoff_fifo_command() - transmits a command using FIFO in dsi
+	 *                          hardware.
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @cmd:           Command information.
+	 * @flags:         Modifiers for command transmission.
+	 *
+	 * The controller hardware FIFO is programmed with command header and
+	 * payload. The transmission is kicked off if
+	 * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+	 * set, caller should make a separate call to trigger_command_dma() to
+	 * transmit the command.
+	 */
+	void (*kickoff_fifo_command)(struct dsi_ctrl_hw *ctrl,
+				     struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+				     u32 flags);
+
+	void (*reset_cmd_fifo)(struct dsi_ctrl_hw *ctrl);
+	/**
+	 * trigger_command_dma() - trigger transmission of command buffer.
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * This trigger can be only used if there was a prior call to
+	 * kickoff_command() of kickoff_fifo_command() with
+	 * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
+	 */
+	void (*trigger_command_dma)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * get_cmd_read_data() - get data read from the peripheral
+	 * @ctrl:           Pointer to the controller host hardware.
+	 * @rd_buf:         Buffer where data will be read into.
+	 * @total_read_len: Number of bytes to read.
+	 */
+	u32 (*get_cmd_read_data)(struct dsi_ctrl_hw *ctrl,
+				 u8 *rd_buf,
+				 u32 total_read_len);
+
+	/**
+	 * ulps_request() - request ulps entry for specified lanes
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
+	 *                 to enter ULPS.
+	 *
+	 * Caller should check if lanes are in ULPS mode by calling
+	 * get_lanes_in_ulps() operation.
+	 */
+	void (*ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+	/**
+	 * ulps_exit() - exit ULPS on specified lanes
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
+	 *                 to exit ULPS.
+	 *
+	 * Caller should check if lanes are in active mode by calling
+	 * get_lanes_in_ulps() operation.
+	 */
+	void (*ulps_exit)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+	/**
+	 * clear_ulps_request() - clear ulps request once all lanes are active
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @lanes:         ORed list of lanes (enum dsi_data_lanes).
+	 *
+	 * ULPS request should be cleared after the lanes have exited ULPS.
+	 */
+	void (*clear_ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+	/**
+	 * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
+	 * state. If 0 is returned, all the lanes are active.
+	 *
+	 * Return: List of lanes in ULPS state.
+	 */
+	u32 (*get_lanes_in_ulps)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @lanes:         ORed list of lanes which need to be clamped.
+	 * @enable_ulps:   TODO:??
+	 */
+	void (*clamp_enable)(struct dsi_ctrl_hw *ctrl,
+			     u32 lanes,
+			     bool enable_ulps);
+
+	/**
+	 * clamp_disable() - disable DSI clamps
+	 * @ctrl:         Pointer to the controller host hardware.
+	 * @lanes:        ORed list of lanes which need to have clamps released.
+	 * @disable_ulps: TODO:??
+	 */
+	void (*clamp_disable)(struct dsi_ctrl_hw *ctrl,
+			      u32 lanes,
+			      bool disable_ulps);
+
+	/**
+	 * get_interrupt_status() - returns the interrupt status
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * Returns the ORed list of interrupts(enum dsi_status_int_type) that
+	 * are active. This list does not include any error interrupts. Caller
+	 * should call get_error_status for error interrupts.
+	 *
+	 * Return: List of active interrupts.
+	 */
+	u32 (*get_interrupt_status)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * clear_interrupt_status() - clears the specified interrupts
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @ints:          List of interrupts to be cleared.
+	 */
+	void (*clear_interrupt_status)(struct dsi_ctrl_hw *ctrl, u32 ints);
+
+	/**
+	 * enable_status_interrupts() - enable the specified interrupts
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @ints:          List of interrupts to be enabled.
+	 *
+	 * Enables the specified interrupts. This list will override the
+	 * previous interrupts enabled through this function. Caller has to
+	 * maintain the state of the interrupts enabled. To disable all
+	 * interrupts, set ints to 0.
+	 */
+	void (*enable_status_interrupts)(struct dsi_ctrl_hw *ctrl, u32 ints);
+
+	/**
+	 * get_error_status() - returns the error status
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * Returns the ORed list of errors(enum dsi_error_int_type) that are
+	 * active. This list does not include any status interrupts. Caller
+	 * should call get_interrupt_status for status interrupts.
+	 *
+	 * Return: List of active error interrupts.
+	 */
+	u64 (*get_error_status)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * clear_error_status() - clears the specified errors
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @errors:          List of errors to be cleared.
+	 */
+	void (*clear_error_status)(struct dsi_ctrl_hw *ctrl, u64 errors);
+
+	/**
+	 * enable_error_interrupts() - enable the specified interrupts
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @errors:        List of errors to be enabled.
+	 *
+	 * Enables the specified interrupts. This list will override the
+	 * previous interrupts enabled through this function. Caller has to
+	 * maintain the state of the interrupts enabled. To disable all
+	 * interrupts, set errors to 0.
+	 */
+	void (*enable_error_interrupts)(struct dsi_ctrl_hw *ctrl, u64 errors);
+
+	/**
+	 * video_test_pattern_setup() - setup test pattern engine for video mode
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @type:          Type of test pattern.
+	 * @init_val:      Initial value to use for generating test pattern.
+	 */
+	void (*video_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
+					 enum dsi_test_pattern type,
+					 u32 init_val);
+
+	/**
+	 * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @type:          Type of test pattern.
+	 * @init_val:      Initial value to use for generating test pattern.
+	 * @stream_id:     Stream Id on which packets are generated.
+	 */
+	void (*cmd_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
+				       enum dsi_test_pattern  type,
+				       u32 init_val,
+				       u32 stream_id);
+
+	/**
+	 * test_pattern_enable() - enable test pattern engine
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @enable:        Enable/Disable test pattern engine.
+	 */
+	void (*test_pattern_enable)(struct dsi_ctrl_hw *ctrl, bool enable);
+
+	/**
+	 * trigger_cmd_test_pattern() - trigger a command mode frame update with
+	 *                              test pattern
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @stream_id:     Stream on which frame update is sent.
+	 */
+	void (*trigger_cmd_test_pattern)(struct dsi_ctrl_hw *ctrl,
+					 u32 stream_id);
+};
+
+/*
+ * struct dsi_ctrl_hw - DSI controller hardware object specific to an instance
+ * @base:           VA for the DSI controller base address.
+ * @length:         Length of the DSI controller register map.
+ * @index:          Instance ID of the controller.
+ * @feature_map:    Features supported by the DSI controller.
+ * @ops:            Function pointers to the operations supported by the
+ *                  controller.
+ */
+struct dsi_ctrl_hw {
+	void __iomem *base;
+	u32 length;
+	void __iomem *mmss_misc_base;
+	u32 mmss_misc_length;
+	u32 index;
+
+	/* features */
+	DECLARE_BITMAP(feature_map, DSI_CTRL_MAX_FEATURES);
+	struct dsi_ctrl_hw_ops ops;
+
+	/* capabilities */
+	u32 supported_interrupts;
+	u64 supported_errors;
+};
+
+#endif /* _DSI_CTRL_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
new file mode 100644
index 0000000..8326024
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
@@ -0,0 +1,1321 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "dsi-hw:" fmt
+#include <linux/delay.h>
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_ctrl_reg_1_4.h"
+#include "dsi_hw.h"
+
+#define MMSS_MISC_CLAMP_REG_OFF           0x0014
+
+/* Unsupported formats default to RGB888 */
+static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
+	0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4 };
+static const u8 video_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
+	0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3 };
+
+
+/**
+ * dsi_setup_trigger_controls() - setup dsi trigger configurations
+ * @ctrl:             Pointer to the controller host hardware.
+ * @cfg:              DSI host configuration that is common to both video and
+ *                    command modes.
+ */
+static void dsi_setup_trigger_controls(struct dsi_ctrl_hw *ctrl,
+				       struct dsi_host_common_cfg *cfg)
+{
+	u32 reg = 0;
+	const u8 trigger_map[DSI_TRIGGER_MAX] = {
+		0x0, 0x2, 0x1, 0x4, 0x5, 0x6 };
+
+	reg |= (cfg->te_mode == DSI_TE_ON_EXT_PIN) ? BIT(31) : 0;
+	reg |= (trigger_map[cfg->dma_cmd_trigger] & 0x7);
+	reg |= (trigger_map[cfg->mdp_cmd_trigger] & 0x7) << 4;
+	DSI_W32(ctrl, DSI_TRIG_CTRL, reg);
+}
+
+/**
+ * dsi_ctrl_hw_14_host_setup() - setup dsi host configuration
+ * @ctrl:             Pointer to the controller host hardware.
+ * @cfg:              DSI host configuration that is common to both video and
+ *                    command modes.
+ */
+void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl,
+			       struct dsi_host_common_cfg *cfg)
+{
+	u32 reg_value = 0;
+
+	dsi_setup_trigger_controls(ctrl, cfg);
+
+	/* Setup clocking timing controls */
+	reg_value = ((cfg->t_clk_post & 0x3F) << 8);
+	reg_value |= (cfg->t_clk_pre & 0x3F);
+	DSI_W32(ctrl, DSI_CLKOUT_TIMING_CTRL, reg_value);
+
+	/* EOT packet control */
+	reg_value = cfg->append_tx_eot ? 1 : 0;
+	reg_value |= (cfg->ignore_rx_eot ? (1 << 4) : 0);
+	DSI_W32(ctrl, DSI_EOT_PACKET_CTRL, reg_value);
+
+	/* Turn on dsi clocks */
+	DSI_W32(ctrl, DSI_CLK_CTRL, 0x23F);
+
+	/* Setup DSI control register */
+	reg_value = 0;
+	reg_value |= (cfg->en_crc_check ? BIT(24) : 0);
+	reg_value |= (cfg->en_ecc_check ? BIT(20) : 0);
+	reg_value |= BIT(8); /* Clock lane */
+	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_3) ? BIT(7) : 0);
+	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_2) ? BIT(6) : 0);
+	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_1) ? BIT(5) : 0);
+	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_0) ? BIT(4) : 0);
+
+	DSI_W32(ctrl, DSI_CTRL, reg_value);
+
+	/* Enable Timing double buffering */
+	DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x1);
+
+	pr_debug("[DSI_%d]Host configuration complete\n", ctrl->index);
+}
+
+/**
+ * phy_sw_reset() - perform a soft reset on the PHY.
+ * @ctrl:        Pointer to the controller host hardware.
+ */
+void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl)
+{
+	DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x1);
+	udelay(1000);
+	DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x0);
+	udelay(100);
+
+	pr_debug("[DSI_%d] phy sw reset done\n", ctrl->index);
+}
+
+/**
+ * soft_reset() - perform a soft reset on DSI controller
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * The video, command and controller engines will be disable before the
+ * reset is triggered. These engines will not be enabled after the reset
+ * is complete. Caller must re-enable the engines.
+ *
+ * If the reset is done while MDP timing engine is turned on, the video
+ * enigne should be re-enabled only during the vertical blanking time.
+ */
+void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl)
+{
+	u32 reg = 0;
+	u32 reg_ctrl = 0;
+
+	/* Clear DSI_EN, VIDEO_MODE_EN, CMD_MODE_EN */
+	reg_ctrl = DSI_R32(ctrl, DSI_CTRL);
+	DSI_W32(ctrl, DSI_CTRL, reg_ctrl & ~0x7);
+
+	/* Force enable PCLK, BYTECLK, AHBM_HCLK */
+	reg = DSI_R32(ctrl, DSI_CLK_CTRL);
+	reg |= 0x23F;
+	DSI_W32(ctrl, DSI_CLK_CTRL, reg);
+
+	/* Trigger soft reset */
+	DSI_W32(ctrl, DSI_SOFT_RESET, 0x1);
+	udelay(1);
+	DSI_W32(ctrl, DSI_SOFT_RESET, 0x0);
+
+	/* Disable force clock on */
+	reg &= ~(BIT(20) | BIT(11));
+	DSI_W32(ctrl, DSI_CLK_CTRL, reg);
+
+	/* Re-enable DSI controller */
+	DSI_W32(ctrl, DSI_CTRL, reg_ctrl);
+	pr_debug("[DSI_%d] ctrl soft reset done\n", ctrl->index);
+}
+
+/**
+ * set_video_timing() - set up the timing for video frame
+ * @ctrl:          Pointer to controller host hardware.
+ * @mode:          Video mode information.
+ *
+ * Set up the video timing parameters for the DSI video mode operation.
+ */
+void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl,
+				     struct dsi_mode_info *mode)
+{
+	u32 reg = 0;
+	u32 hs_start = 0;
+	u32 hs_end, active_h_start, active_h_end, h_total;
+	u32 vs_start = 0, vs_end = 0;
+	u32 vpos_start = 0, vpos_end, active_v_start, active_v_end, v_total;
+
+	hs_end = mode->h_sync_width;
+	active_h_start = mode->h_sync_width + mode->h_back_porch;
+	active_h_end = active_h_start + mode->h_active;
+	h_total = (mode->h_sync_width + mode->h_back_porch + mode->h_active +
+		   mode->h_front_porch) - 1;
+
+	vpos_end = mode->v_sync_width;
+	active_v_start = mode->v_sync_width + mode->v_back_porch;
+	active_v_end = active_v_start + mode->v_active;
+	v_total = (mode->v_sync_width + mode->v_back_porch + mode->v_active +
+		   mode->v_front_porch) - 1;
+
+	reg = ((active_h_end & 0xFFFF) << 16) | (active_h_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_H, reg);
+
+	reg = ((active_v_end & 0xFFFF) << 16) | (active_v_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_V, reg);
+
+	reg = ((v_total & 0xFFFF) << 16) | (h_total & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_TOTAL, reg);
+
+	reg = ((hs_end & 0xFFFF) << 16) | (hs_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_HSYNC, reg);
+
+	reg = ((vs_end & 0xFFFF) << 16) | (vs_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC, reg);
+
+	reg = ((vpos_end & 0xFFFF) << 16) | (vpos_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC_VPOS, reg);
+
+	/* TODO: HS TIMER value? */
+	DSI_W32(ctrl, DSI_HS_TIMER_CTRL, 0x3FD08);
+	DSI_W32(ctrl, DSI_MISR_VIDEO_CTRL, 0x10100);
+	DSI_W32(ctrl, DSI_DSI_TIMING_FLUSH, 0x1);
+	pr_debug("[DSI_%d] ctrl video parameters updated\n", ctrl->index);
+}
+
+/**
+ * video_engine_setup() - Setup dsi host controller for video mode
+ * @ctrl:          Pointer to controller host hardware.
+ * @common_cfg:    Common configuration parameters.
+ * @cfg:           Video mode configuration.
+ *
+ * Set up DSI video engine with a specific configuration. Controller and
+ * video engine are not enabled as part of this function.
+ */
+void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl,
+				       struct dsi_host_common_cfg *common_cfg,
+				       struct dsi_video_engine_cfg *cfg)
+{
+	u32 reg = 0;
+
+	reg |= (cfg->last_line_interleave_en ? BIT(31) : 0);
+	reg |= (cfg->pulse_mode_hsa_he ? BIT(28) : 0);
+	reg |= (cfg->hfp_lp11_en ? BIT(24) : 0);
+	reg |= (cfg->hbp_lp11_en ? BIT(20) : 0);
+	reg |= (cfg->hsa_lp11_en ? BIT(16) : 0);
+	reg |= (cfg->eof_bllp_lp11_en ? BIT(15) : 0);
+	reg |= (cfg->bllp_lp11_en ? BIT(12) : 0);
+	reg |= (cfg->traffic_mode & 0x3) << 8;
+	reg |= (cfg->vc_id & 0x3);
+	reg |= (video_mode_format_map[common_cfg->dst_format] & 0x3) << 4;
+	DSI_W32(ctrl, DSI_VIDEO_MODE_CTRL, reg);
+
+	reg = (common_cfg->swap_mode & 0x7) << 12;
+	reg |= (common_cfg->bit_swap_red ? BIT(0) : 0);
+	reg |= (common_cfg->bit_swap_green ? BIT(4) : 0);
+	reg |= (common_cfg->bit_swap_blue ? BIT(8) : 0);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_DATA_CTRL, reg);
+
+	pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index);
+}
+
+/**
+ * cmd_engine_setup() - setup dsi host controller for command mode
+ * @ctrl:          Pointer to the controller host hardware.
+ * @common_cfg:    Common configuration parameters.
+ * @cfg:           Command mode configuration.
+ *
+ * Setup DSI CMD engine with a specific configuration. Controller and
+ * command engine are not enabled as part of this function.
+ */
+void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
+				     struct dsi_host_common_cfg *common_cfg,
+				     struct dsi_cmd_engine_cfg *cfg)
+{
+	u32 reg = 0;
+
+	reg = (cfg->max_cmd_packets_interleave & 0xF) << 20;
+	reg |= (common_cfg->bit_swap_red ? BIT(4) : 0);
+	reg |= (common_cfg->bit_swap_green ? BIT(8) : 0);
+	reg |= (common_cfg->bit_swap_blue ? BIT(12) : 0);
+	reg |= cmd_mode_format_map[common_cfg->dst_format];
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_CTRL, reg);
+
+	reg = cfg->wr_mem_start & 0xFF;
+	reg |= (cfg->wr_mem_continue & 0xFF) << 8;
+	reg |= (cfg->insert_dcs_command ? BIT(16) : 0);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL, reg);
+
+	pr_debug("[DSI_%d] Cmd engine setup done\n", ctrl->index);
+}
+
+/**
+ * video_engine_en() - enable DSI video engine
+ * @ctrl:          Pointer to controller host hardware.
+ * @on:            Enable/disabel video engine.
+ */
+void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+	u32 reg = 0;
+
+	/* Set/Clear VIDEO_MODE_EN bit */
+	reg = DSI_R32(ctrl, DSI_CTRL);
+	if (on)
+		reg |= BIT(1);
+	else
+		reg &= ~BIT(1);
+
+	DSI_W32(ctrl, DSI_CTRL, reg);
+
+	pr_debug("[DSI_%d] Video engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * ctrl_en() - enable DSI controller engine
+ * @ctrl:          Pointer to the controller host hardware.
+ * @on:            turn on/off the DSI controller engine.
+ */
+void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+	u32 reg = 0;
+
+	/* Set/Clear DSI_EN bit */
+	reg = DSI_R32(ctrl, DSI_CTRL);
+	if (on)
+		reg |= BIT(0);
+	else
+		reg &= ~BIT(0);
+
+	DSI_W32(ctrl, DSI_CTRL, reg);
+
+	pr_debug("[DSI_%d] Controller engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * cmd_engine_en() - enable DSI controller command engine
+ * @ctrl:          Pointer to the controller host hardware.
+ * @on:            Turn on/off the DSI command engine.
+ */
+void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+	u32 reg = 0;
+
+	/* Set/Clear CMD_MODE_EN bit */
+	reg = DSI_R32(ctrl, DSI_CTRL);
+	if (on)
+		reg |= BIT(2);
+	else
+		reg &= ~BIT(2);
+
+	DSI_W32(ctrl, DSI_CTRL, reg);
+
+	pr_debug("[DSI_%d] command engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * setup_lane_map() - setup mapping between logical and physical lanes
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lane_map:      Structure defining the mapping between DSI logical
+ *                 lanes and physical lanes.
+ */
+void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
+			       struct dsi_lane_mapping *lane_map)
+{
+	u32 reg_value = 0;
+	u32 lane_number = ((lane_map->physical_lane0 * 1000)+
+			   (lane_map->physical_lane1 * 100) +
+			   (lane_map->physical_lane2 * 10) +
+			   (lane_map->physical_lane3));
+
+	if (lane_number == 123)
+		reg_value = 0;
+	else if (lane_number == 3012)
+		reg_value = 1;
+	else if (lane_number == 2301)
+		reg_value = 2;
+	else if (lane_number == 1230)
+		reg_value = 3;
+	else if (lane_number == 321)
+		reg_value = 4;
+	else if (lane_number == 1032)
+		reg_value = 5;
+	else if (lane_number == 2103)
+		reg_value = 6;
+	else if (lane_number == 3210)
+		reg_value = 7;
+
+	DSI_W32(ctrl, DSI_LANE_SWAP_CTRL, reg_value);
+
+	pr_debug("[DSI_%d] Lane swap setup complete\n", ctrl->index);
+}
+
+/**
+ * kickoff_command() - transmits commands stored in memory
+ * @ctrl:          Pointer to the controller host hardware.
+ * @cmd:           Command information.
+ * @flags:         Modifiers for command transmission.
+ *
+ * The controller hardware is programmed with address and size of the
+ * command buffer. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl,
+				    struct dsi_ctrl_cmd_dma_info *cmd,
+				    u32 flags)
+{
+	u32 reg = 0;
+
+	/*Set BROADCAST_EN and EMBEDDED_MODE */
+	reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
+	if (cmd->en_broadcast)
+		reg |= BIT(31);
+	else
+		reg &= ~BIT(31);
+
+	if (cmd->is_master)
+		reg |= BIT(30);
+	else
+		reg &= ~BIT(30);
+
+	if (cmd->use_lpm)
+		reg |= BIT(26);
+	else
+		reg &= ~BIT(26);
+
+	reg |= BIT(28);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
+
+	DSI_W32(ctrl, DSI_DMA_CMD_OFFSET, cmd->offset);
+	DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->length & 0xFFFFFF));
+
+	/* wait for writes to complete before kick off */
+	wmb();
+
+	if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
+		DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+}
+
+/**
+ * kickoff_fifo_command() - transmits a command using FIFO in dsi
+ *                          hardware.
+ * @ctrl:          Pointer to the controller host hardware.
+ * @cmd:           Command information.
+ * @flags:         Modifiers for command transmission.
+ *
+ * The controller hardware FIFO is programmed with command header and
+ * payload. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
+					 struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+					 u32 flags)
+{
+	u32 reg = 0, i = 0;
+	u32 *ptr = cmd->command;
+	/*
+	 * Set CMD_DMA_TPG_EN, TPG_DMA_FIFO_MODE and
+	 * CMD_DMA_PATTERN_SEL = custom pattern stored in TPG DMA FIFO
+	 */
+	reg = (BIT(1) | BIT(2) | (0x3 << 16));
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+	/*
+	 * Program the FIFO with command buffer. Hardware requires an extra
+	 * DWORD (set to zero) if the length of command buffer is odd DWORDS.
+	 */
+	for (i = 0; i < cmd->size; i += 4) {
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, *ptr);
+		ptr++;
+	}
+
+	if ((cmd->size / 4) & 0x1)
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, 0);
+
+	/*Set BROADCAST_EN and EMBEDDED_MODE */
+	reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
+	if (cmd->en_broadcast)
+		reg |= BIT(31);
+	else
+		reg &= ~BIT(31);
+
+	if (cmd->is_master)
+		reg |= BIT(30);
+	else
+		reg &= ~BIT(30);
+
+	if (cmd->use_lpm)
+		reg |= BIT(26);
+	else
+		reg &= ~BIT(26);
+
+	reg |= BIT(28);
+
+	DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
+
+	DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->size & 0xFFFFFFFF));
+	/* Finish writes before command trigger */
+	wmb();
+
+	if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
+		DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+
+	pr_debug("[DSI_%d]size=%d, trigger = %d\n",
+		 ctrl->index, cmd->size,
+		 (flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER) ? false : true);
+}
+
+void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl)
+{
+	/* disable cmd dma tpg */
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, 0x0);
+
+	DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x1);
+	udelay(1);
+	DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x0);
+}
+
+/**
+ * trigger_command_dma() - trigger transmission of command buffer.
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * This trigger can be only used if there was a prior call to
+ * kickoff_command() of kickoff_fifo_command() with
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
+ */
+void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl)
+{
+	DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+	pr_debug("[DSI_%d] CMD DMA triggered\n", ctrl->index);
+}
+
+/**
+ * get_cmd_read_data() - get data read from the peripheral
+ * @ctrl:           Pointer to the controller host hardware.
+ * @rd_buf:         Buffer where data will be read into.
+ * @total_read_len: Number of bytes to read.
+ *
+ * return: number of bytes read.
+ */
+u32 dsi_ctrl_hw_14_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
+				     u8 *rd_buf,
+				     u32 read_offset,
+				     u32 total_read_len)
+{
+	u32 *lp, *temp, data;
+	int i, j = 0, cnt;
+	u32 read_cnt;
+	u32 rx_byte = 0;
+	u32 repeated_bytes = 0;
+	u8 reg[16];
+	u32 pkt_size = 0;
+	int buf_offset = read_offset;
+
+	lp = (u32 *)rd_buf;
+	temp = (u32 *)reg;
+	cnt = (rx_byte + 3) >> 2;
+
+	if (cnt > 4)
+		cnt = 4;
+
+	if (rx_byte == 4)
+		read_cnt = 4;
+	else
+		read_cnt = pkt_size + 6;
+
+	if (read_cnt > 16) {
+		int bytes_shifted;
+
+		bytes_shifted = read_cnt - 16;
+		repeated_bytes = buf_offset - bytes_shifted;
+	}
+
+	for (i = cnt - 1; i >= 0; i--) {
+		data = DSI_R32(ctrl, DSI_RDBK_DATA0 + i*4);
+		*temp++ = ntohl(data);
+	}
+
+	for (i = repeated_bytes; i < 16; i++)
+		rd_buf[j++] = reg[i];
+
+	pr_debug("[DSI_%d] Read %d bytes\n", ctrl->index, j);
+	return j;
+}
+/**
+ * ulps_request() - request ulps entry for specified lanes
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
+ *                 to enter ULPS.
+ *
+ * Caller should check if lanes are in ULPS mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+	u32 reg = 0;
+
+	if (lanes & DSI_CLOCK_LANE)
+		reg = BIT(4);
+	if (lanes & DSI_DATA_LANE_0)
+		reg |= BIT(0);
+	if (lanes & DSI_DATA_LANE_1)
+		reg |= BIT(1);
+	if (lanes & DSI_DATA_LANE_2)
+		reg |= BIT(2);
+	if (lanes & DSI_DATA_LANE_3)
+		reg |= BIT(3);
+
+	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+	pr_debug("[DSI_%d] ULPS requested for lanes 0x%x\n", ctrl->index,
+		 lanes);
+}
+
+/**
+ * ulps_exit() - exit ULPS on specified lanes
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
+ *                 to exit ULPS.
+ *
+ * Caller should check if lanes are in active mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+	u32 reg = 0;
+
+	reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+	if (lanes & DSI_CLOCK_LANE)
+		reg |= BIT(12);
+	if (lanes & DSI_DATA_LANE_0)
+		reg |= BIT(8);
+	if (lanes & DSI_DATA_LANE_1)
+		reg |= BIT(9);
+	if (lanes & DSI_DATA_LANE_2)
+		reg |= BIT(10);
+	if (lanes & DSI_DATA_LANE_3)
+		reg |= BIT(11);
+
+	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+	pr_debug("[DSI_%d] ULPS exit request for lanes=0x%x\n",
+		 ctrl->index, lanes);
+}
+
+/**
+ * clear_ulps_request() - clear ulps request once all lanes are active
+ * @ctrl:          Pointer to controller host hardware.
+ * @lanes:         ORed list of lanes (enum dsi_data_lanes).
+ *
+ * ULPS request should be cleared after the lanes have exited ULPS.
+ */
+void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+	u32 reg = 0;
+
+	reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+	reg &= ~BIT(4); /* clock lane */
+	if (lanes & DSI_DATA_LANE_0)
+		reg &= ~BIT(0);
+	if (lanes & DSI_DATA_LANE_1)
+		reg &= ~BIT(1);
+	if (lanes & DSI_DATA_LANE_2)
+		reg &= ~BIT(2);
+	if (lanes & DSI_DATA_LANE_3)
+		reg &= ~BIT(3);
+
+	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+	/*
+	 * HPG recommends separate writes for clearing ULPS_REQUEST and
+	 * ULPS_EXIT.
+	 */
+	DSI_W32(ctrl, DSI_LANE_CTRL, 0x0);
+
+	pr_debug("[DSI_%d] ULPS request cleared\n", ctrl->index);
+}
+
+/**
+ * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
+ * state. If 0 is returned, all the lanes are active.
+ *
+ * Return: List of lanes in ULPS state.
+ */
+u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl)
+{
+	u32 reg = 0;
+	u32 lanes = 0;
+
+	reg = DSI_R32(ctrl, DSI_LANE_STATUS);
+	if (!(reg & BIT(8)))
+		lanes |= DSI_DATA_LANE_0;
+	if (!(reg & BIT(9)))
+		lanes |= DSI_DATA_LANE_1;
+	if (!(reg & BIT(10)))
+		lanes |= DSI_DATA_LANE_2;
+	if (!(reg & BIT(11)))
+		lanes |= DSI_DATA_LANE_3;
+	if (!(reg & BIT(12)))
+		lanes |= DSI_CLOCK_LANE;
+
+	pr_debug("[DSI_%d] lanes in ulps = 0x%x\n", ctrl->index, lanes);
+	return lanes;
+}
+
+/**
+ * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lanes:         ORed list of lanes which need to be clamped.
+ * @enable_ulps:   TODO:??
+ */
+void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
+				 u32 lanes,
+				 bool enable_ulps)
+{
+	u32 clamp_reg = 0;
+	u32 bit_shift = 0;
+	u32 reg = 0;
+
+	if (ctrl->index == 1)
+		bit_shift = 16;
+
+	if (lanes & DSI_CLOCK_LANE) {
+		clamp_reg |= BIT(9);
+		if (enable_ulps)
+			clamp_reg |= BIT(8);
+	}
+
+	if (lanes & DSI_DATA_LANE_0) {
+		clamp_reg |= BIT(7);
+		if (enable_ulps)
+			clamp_reg |= BIT(6);
+	}
+
+	if (lanes & DSI_DATA_LANE_1) {
+		clamp_reg |= BIT(5);
+		if (enable_ulps)
+			clamp_reg |= BIT(4);
+	}
+
+	if (lanes & DSI_DATA_LANE_2) {
+		clamp_reg |= BIT(3);
+		if (enable_ulps)
+			clamp_reg |= BIT(2);
+	}
+
+	if (lanes & DSI_DATA_LANE_3) {
+		clamp_reg |= BIT(1);
+		if (enable_ulps)
+			clamp_reg |= BIT(0);
+	}
+
+	clamp_reg |= BIT(15); /* Enable clamp */
+
+	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+	reg |= (clamp_reg << bit_shift);
+	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+
+	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+	reg |= BIT(30);
+	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+	pr_debug("[DSI_%d] Clamps enabled for lanes=0x%x\n", ctrl->index,
+		 lanes);
+}
+
+/**
+ * clamp_disable() - disable DSI clamps
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lanes:         ORed list of lanes which need to have clamps released.
+ * @disable_ulps:   TODO:??
+ */
+void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
+				  u32 lanes,
+				  bool disable_ulps)
+{
+	u32 clamp_reg = 0;
+	u32 bit_shift = 0;
+	u32 reg = 0;
+
+	if (ctrl->index == 1)
+		bit_shift = 16;
+
+	if (lanes & DSI_CLOCK_LANE) {
+		clamp_reg |= BIT(9);
+		if (disable_ulps)
+			clamp_reg |= BIT(8);
+	}
+
+	if (lanes & DSI_DATA_LANE_0) {
+		clamp_reg |= BIT(7);
+		if (disable_ulps)
+			clamp_reg |= BIT(6);
+	}
+
+	if (lanes & DSI_DATA_LANE_1) {
+		clamp_reg |= BIT(5);
+		if (disable_ulps)
+			clamp_reg |= BIT(4);
+	}
+
+	if (lanes & DSI_DATA_LANE_2) {
+		clamp_reg |= BIT(3);
+		if (disable_ulps)
+			clamp_reg |= BIT(2);
+	}
+
+	if (lanes & DSI_DATA_LANE_3) {
+		clamp_reg |= BIT(1);
+		if (disable_ulps)
+			clamp_reg |= BIT(0);
+	}
+
+	clamp_reg |= BIT(15); /* Enable clamp */
+	clamp_reg <<= bit_shift;
+
+	/* Disable PHY reset skip */
+	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+	reg &= ~BIT(30);
+	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+	reg &= ~(clamp_reg);
+	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+	pr_debug("[DSI_%d] Disable clamps for lanes=%d\n", ctrl->index, lanes);
+}
+
+/**
+ * get_interrupt_status() - returns the interrupt status
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of interrupts(enum dsi_status_int_type) that
+ * are active. This list does not include any error interrupts. Caller
+ * should call get_error_status for error interrupts.
+ *
+ * Return: List of active interrupts.
+ */
+u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl)
+{
+	u32 reg = 0;
+	u32 ints = 0;
+
+	reg = DSI_R32(ctrl, DSI_INT_CTRL);
+
+	if (reg & BIT(0))
+		ints |= DSI_CMD_MODE_DMA_DONE;
+	if (reg & BIT(8))
+		ints |= DSI_CMD_FRAME_DONE;
+	if (reg & BIT(10))
+		ints |= DSI_CMD_STREAM0_FRAME_DONE;
+	if (reg & BIT(12))
+		ints |= DSI_CMD_STREAM1_FRAME_DONE;
+	if (reg & BIT(14))
+		ints |= DSI_CMD_STREAM2_FRAME_DONE;
+	if (reg & BIT(16))
+		ints |= DSI_VIDEO_MODE_FRAME_DONE;
+	if (reg & BIT(20))
+		ints |= DSI_BTA_DONE;
+	if (reg & BIT(28))
+		ints |= DSI_DYN_REFRESH_DONE;
+	if (reg & BIT(30))
+		ints |= DSI_DESKEW_DONE;
+
+	pr_debug("[DSI_%d] Interrupt status = 0x%x, INT_CTRL=0x%x\n",
+		 ctrl->index, ints, reg);
+	return ints;
+}
+
+/**
+ * clear_interrupt_status() - clears the specified interrupts
+ * @ctrl:          Pointer to the controller host hardware.
+ * @ints:          List of interrupts to be cleared.
+ */
+void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints)
+{
+	u32 reg = 0;
+
+	if (ints & DSI_CMD_MODE_DMA_DONE)
+		reg |= BIT(0);
+	if (ints & DSI_CMD_FRAME_DONE)
+		reg |= BIT(8);
+	if (ints & DSI_CMD_STREAM0_FRAME_DONE)
+		reg |= BIT(10);
+	if (ints & DSI_CMD_STREAM1_FRAME_DONE)
+		reg |= BIT(12);
+	if (ints & DSI_CMD_STREAM2_FRAME_DONE)
+		reg |= BIT(14);
+	if (ints & DSI_VIDEO_MODE_FRAME_DONE)
+		reg |= BIT(16);
+	if (ints & DSI_BTA_DONE)
+		reg |= BIT(20);
+	if (ints & DSI_DYN_REFRESH_DONE)
+		reg |= BIT(28);
+	if (ints & DSI_DESKEW_DONE)
+		reg |= BIT(30);
+
+	DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+	pr_debug("[DSI_%d] Clear interrupts, ints = 0x%x, INT_CTRL=0x%x\n",
+		 ctrl->index, ints, reg);
+}
+
+/**
+ * enable_status_interrupts() - enable the specified interrupts
+ * @ctrl:          Pointer to the controller host hardware.
+ * @ints:          List of interrupts to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set ints to 0.
+ */
+void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl, u32 ints)
+{
+	u32 reg = 0;
+
+	/* Do not change value of DSI_ERROR_MASK bit */
+	reg |= (DSI_R32(ctrl, DSI_INT_CTRL) & BIT(25));
+	if (ints & DSI_CMD_MODE_DMA_DONE)
+		reg |= BIT(1);
+	if (ints & DSI_CMD_FRAME_DONE)
+		reg |= BIT(9);
+	if (ints & DSI_CMD_STREAM0_FRAME_DONE)
+		reg |= BIT(11);
+	if (ints & DSI_CMD_STREAM1_FRAME_DONE)
+		reg |= BIT(13);
+	if (ints & DSI_CMD_STREAM2_FRAME_DONE)
+		reg |= BIT(15);
+	if (ints & DSI_VIDEO_MODE_FRAME_DONE)
+		reg |= BIT(17);
+	if (ints & DSI_BTA_DONE)
+		reg |= BIT(21);
+	if (ints & DSI_DYN_REFRESH_DONE)
+		reg |= BIT(29);
+	if (ints & DSI_DESKEW_DONE)
+		reg |= BIT(31);
+
+	DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+	pr_debug("[DSI_%d] Enable interrupts 0x%x, INT_CTRL=0x%x\n",
+		 ctrl->index, ints, reg);
+}
+
+/**
+ * get_error_status() - returns the error status
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of errors(enum dsi_error_int_type) that are
+ * active. This list does not include any status interrupts. Caller
+ * should call get_interrupt_status for status interrupts.
+ *
+ * Return: List of active error interrupts.
+ */
+u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl)
+{
+	u32 dln0_phy_err;
+	u32 fifo_status;
+	u32 ack_error;
+	u32 timeout_errors;
+	u32 clk_error;
+	u32 dsi_status;
+	u64 errors = 0;
+
+	dln0_phy_err = DSI_R32(ctrl, DSI_DLN0_PHY_ERR);
+	if (dln0_phy_err & BIT(0))
+		errors |= DSI_DLN0_ESC_ENTRY_ERR;
+	if (dln0_phy_err & BIT(4))
+		errors |= DSI_DLN0_ESC_SYNC_ERR;
+	if (dln0_phy_err & BIT(8))
+		errors |= DSI_DLN0_LP_CONTROL_ERR;
+	if (dln0_phy_err & BIT(12))
+		errors |= DSI_DLN0_LP0_CONTENTION;
+	if (dln0_phy_err & BIT(16))
+		errors |= DSI_DLN0_LP1_CONTENTION;
+
+	fifo_status = DSI_R32(ctrl, DSI_FIFO_STATUS);
+	if (fifo_status & BIT(7))
+		errors |= DSI_CMD_MDP_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(10))
+		errors |= DSI_CMD_DMA_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(18))
+		errors |= DSI_DLN0_HS_FIFO_OVERFLOW;
+	if (fifo_status & BIT(19))
+		errors |= DSI_DLN0_HS_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(22))
+		errors |= DSI_DLN1_HS_FIFO_OVERFLOW;
+	if (fifo_status & BIT(23))
+		errors |= DSI_DLN1_HS_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(26))
+		errors |= DSI_DLN2_HS_FIFO_OVERFLOW;
+	if (fifo_status & BIT(27))
+		errors |= DSI_DLN2_HS_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(30))
+		errors |= DSI_DLN3_HS_FIFO_OVERFLOW;
+	if (fifo_status & BIT(31))
+		errors |= DSI_DLN3_HS_FIFO_UNDERFLOW;
+
+	ack_error = DSI_R32(ctrl, DSI_ACK_ERR_STATUS);
+	if (ack_error & BIT(16))
+		errors |= DSI_RDBK_SINGLE_ECC_ERR;
+	if (ack_error & BIT(17))
+		errors |= DSI_RDBK_MULTI_ECC_ERR;
+	if (ack_error & BIT(20))
+		errors |= DSI_RDBK_CRC_ERR;
+	if (ack_error & BIT(23))
+		errors |= DSI_RDBK_INCOMPLETE_PKT;
+	if (ack_error & BIT(24))
+		errors |= DSI_PERIPH_ERROR_PKT;
+
+	timeout_errors = DSI_R32(ctrl, DSI_TIMEOUT_STATUS);
+	if (timeout_errors & BIT(0))
+		errors |= DSI_HS_TX_TIMEOUT;
+	if (timeout_errors & BIT(4))
+		errors |= DSI_LP_RX_TIMEOUT;
+	if (timeout_errors & BIT(8))
+		errors |= DSI_BTA_TIMEOUT;
+
+	clk_error = DSI_R32(ctrl, DSI_CLK_STATUS);
+	if (clk_error & BIT(16))
+		errors |= DSI_PLL_UNLOCK;
+
+	dsi_status = DSI_R32(ctrl, DSI_STATUS);
+	if (dsi_status & BIT(31))
+		errors |= DSI_INTERLEAVE_OP_CONTENTION;
+
+	pr_debug("[DSI_%d] Error status = 0x%llx, phy=0x%x, fifo=0x%x",
+		 ctrl->index, errors, dln0_phy_err, fifo_status);
+	pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
+		 ctrl->index, ack_error, timeout_errors, clk_error, dsi_status);
+	return errors;
+}
+
+/**
+ * clear_error_status() - clears the specified errors
+ * @ctrl:          Pointer to the controller host hardware.
+ * @errors:          List of errors to be cleared.
+ */
+void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors)
+{
+	u32 dln0_phy_err = 0;
+	u32 fifo_status = 0;
+	u32 ack_error = 0;
+	u32 timeout_error = 0;
+	u32 clk_error = 0;
+	u32 dsi_status = 0;
+	u32 int_ctrl = 0;
+
+	if (errors & DSI_RDBK_SINGLE_ECC_ERR)
+		ack_error |= BIT(16);
+	if (errors & DSI_RDBK_MULTI_ECC_ERR)
+		ack_error |= BIT(17);
+	if (errors & DSI_RDBK_CRC_ERR)
+		ack_error |= BIT(20);
+	if (errors & DSI_RDBK_INCOMPLETE_PKT)
+		ack_error |= BIT(23);
+	if (errors & DSI_PERIPH_ERROR_PKT)
+		ack_error |= BIT(24);
+
+	if (errors & DSI_LP_RX_TIMEOUT)
+		timeout_error |= BIT(4);
+	if (errors & DSI_HS_TX_TIMEOUT)
+		timeout_error |= BIT(0);
+	if (errors & DSI_BTA_TIMEOUT)
+		timeout_error |= BIT(8);
+
+	if (errors & DSI_PLL_UNLOCK)
+		clk_error |= BIT(16);
+
+	if (errors & DSI_DLN0_LP0_CONTENTION)
+		dln0_phy_err |= BIT(12);
+	if (errors & DSI_DLN0_LP1_CONTENTION)
+		dln0_phy_err |= BIT(16);
+	if (errors & DSI_DLN0_ESC_ENTRY_ERR)
+		dln0_phy_err |= BIT(0);
+	if (errors & DSI_DLN0_ESC_SYNC_ERR)
+		dln0_phy_err |= BIT(4);
+	if (errors & DSI_DLN0_LP_CONTROL_ERR)
+		dln0_phy_err |= BIT(8);
+
+	if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
+		fifo_status |= BIT(10);
+	if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
+		fifo_status |= BIT(7);
+	if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
+		fifo_status |= BIT(18);
+	if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
+		fifo_status |= BIT(22);
+	if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
+		fifo_status |= BIT(26);
+	if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
+		fifo_status |= BIT(30);
+	if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
+		fifo_status |= BIT(19);
+	if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
+		fifo_status |= BIT(23);
+	if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
+		fifo_status |= BIT(27);
+	if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
+		fifo_status |= BIT(31);
+
+	if (errors & DSI_INTERLEAVE_OP_CONTENTION)
+		dsi_status |= BIT(31);
+
+	DSI_W32(ctrl, DSI_DLN0_PHY_ERR, dln0_phy_err);
+	DSI_W32(ctrl, DSI_FIFO_STATUS, fifo_status);
+	DSI_W32(ctrl, DSI_ACK_ERR_STATUS, ack_error);
+	DSI_W32(ctrl, DSI_TIMEOUT_STATUS, timeout_error);
+	DSI_W32(ctrl, DSI_CLK_STATUS, clk_error);
+	DSI_W32(ctrl, DSI_STATUS, dsi_status);
+
+	int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
+	int_ctrl |= BIT(24);
+	DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
+	pr_debug("[DSI_%d] clear errors = 0x%llx, phy=0x%x, fifo=0x%x",
+		 ctrl->index, errors, dln0_phy_err, fifo_status);
+	pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
+		 ctrl->index, ack_error, timeout_error, clk_error, dsi_status);
+}
+
+/**
+ * enable_error_interrupts() - enable the specified interrupts
+ * @ctrl:          Pointer to the controller host hardware.
+ * @errors:        List of errors to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set errors to 0.
+ */
+void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
+					    u64 errors)
+{
+	u32 int_ctrl = 0;
+	u32 int_mask0 = 0x7FFF3BFF;
+
+	int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
+	if (errors)
+		int_ctrl |= BIT(25);
+	else
+		int_ctrl &= ~BIT(25);
+
+	if (errors & DSI_RDBK_SINGLE_ECC_ERR)
+		int_mask0 &= ~BIT(0);
+	if (errors & DSI_RDBK_MULTI_ECC_ERR)
+		int_mask0 &= ~BIT(1);
+	if (errors & DSI_RDBK_CRC_ERR)
+		int_mask0 &= ~BIT(2);
+	if (errors & DSI_RDBK_INCOMPLETE_PKT)
+		int_mask0 &= ~BIT(3);
+	if (errors & DSI_PERIPH_ERROR_PKT)
+		int_mask0 &= ~BIT(4);
+
+	if (errors & DSI_LP_RX_TIMEOUT)
+		int_mask0 &= ~BIT(5);
+	if (errors & DSI_HS_TX_TIMEOUT)
+		int_mask0 &= ~BIT(6);
+	if (errors & DSI_BTA_TIMEOUT)
+		int_mask0 &= ~BIT(7);
+
+	if (errors & DSI_PLL_UNLOCK)
+		int_mask0 &= ~BIT(28);
+
+	if (errors & DSI_DLN0_LP0_CONTENTION)
+		int_mask0 &= ~BIT(24);
+	if (errors & DSI_DLN0_LP1_CONTENTION)
+		int_mask0 &= ~BIT(25);
+	if (errors & DSI_DLN0_ESC_ENTRY_ERR)
+		int_mask0 &= ~BIT(21);
+	if (errors & DSI_DLN0_ESC_SYNC_ERR)
+		int_mask0 &= ~BIT(22);
+	if (errors & DSI_DLN0_LP_CONTROL_ERR)
+		int_mask0 &= ~BIT(23);
+
+	if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(9);
+	if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(11);
+	if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
+		int_mask0 &= ~BIT(16);
+	if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
+		int_mask0 &= ~BIT(17);
+	if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
+		int_mask0 &= ~BIT(18);
+	if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
+		int_mask0 &= ~BIT(19);
+	if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(26);
+	if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(27);
+	if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(29);
+	if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(30);
+
+	if (errors & DSI_INTERLEAVE_OP_CONTENTION)
+		int_mask0 &= ~BIT(8);
+
+	DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
+	DSI_W32(ctrl, DSI_ERR_INT_MASK0, int_mask0);
+
+	pr_debug("[DSI_%d] enable errors = 0x%llx, int_mask0=0x%x\n",
+		 ctrl->index, errors, int_mask0);
+}
+
+/**
+ * video_test_pattern_setup() - setup test pattern engine for video mode
+ * @ctrl:          Pointer to the controller host hardware.
+ * @type:          Type of test pattern.
+ * @init_val:      Initial value to use for generating test pattern.
+ */
+void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+					     enum dsi_test_pattern type,
+					     u32 init_val)
+{
+	u32 reg = 0;
+
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, init_val);
+
+	switch (type) {
+	case DSI_TEST_PATTERN_FIXED:
+		reg |= (0x2 << 4);
+		break;
+	case DSI_TEST_PATTERN_INC:
+		reg |= (0x1 << 4);
+		break;
+	case DSI_TEST_PATTERN_POLY:
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_POLY, 0xF0F0F);
+		break;
+	default:
+		break;
+	}
+
+	DSI_W32(ctrl, DSI_TPG_MAIN_CONTROL, 0x100);
+	DSI_W32(ctrl, DSI_TPG_VIDEO_CONFIG, 0x5);
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+	pr_debug("[DSI_%d] Video test pattern setup done\n", ctrl->index);
+}
+
+/**
+ * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
+ * @ctrl:          Pointer to the controller host hardware.
+ * @type:          Type of test pattern.
+ * @init_val:      Initial value to use for generating test pattern.
+ * @stream_id:     Stream Id on which packets are generated.
+ */
+void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+					   enum dsi_test_pattern type,
+					   u32 init_val,
+					   u32 stream_id)
+{
+	u32 reg = 0;
+	u32 init_offset;
+	u32 poly_offset;
+	u32 pattern_sel_shift;
+
+	switch (stream_id) {
+	case 0:
+		init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0;
+		poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY;
+		pattern_sel_shift = 8;
+		break;
+	case 1:
+		init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1;
+		poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY;
+		pattern_sel_shift = 12;
+		break;
+	case 2:
+		init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2;
+		poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY;
+		pattern_sel_shift = 20;
+		break;
+	default:
+		return;
+	}
+
+	DSI_W32(ctrl, init_offset, init_val);
+
+	switch (type) {
+	case DSI_TEST_PATTERN_FIXED:
+		reg |= (0x2 << pattern_sel_shift);
+		break;
+	case DSI_TEST_PATTERN_INC:
+		reg |= (0x1 << pattern_sel_shift);
+		break;
+	case DSI_TEST_PATTERN_POLY:
+		DSI_W32(ctrl, poly_offset, 0xF0F0F);
+		break;
+	default:
+		break;
+	}
+
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+	pr_debug("[DSI_%d] Cmd test pattern setup done\n", ctrl->index);
+}
+
+/**
+ * test_pattern_enable() - enable test pattern engine
+ * @ctrl:          Pointer to the controller host hardware.
+ * @enable:        Enable/Disable test pattern engine.
+ */
+void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl,
+					bool enable)
+{
+	u32 reg = DSI_R32(ctrl, DSI_TEST_PATTERN_GEN_CTRL);
+
+	if (enable)
+		reg |= BIT(0);
+	else
+		reg &= ~BIT(0);
+
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+	pr_debug("[DSI_%d] Test pattern enable=%d\n", ctrl->index, enable);
+}
+
+/**
+ * trigger_cmd_test_pattern() - trigger a command mode frame update with
+ *                              test pattern
+ * @ctrl:          Pointer to the controller host hardware.
+ * @stream_id:     Stream on which frame update is sent.
+ */
+void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
+					     u32 stream_id)
+{
+	switch (stream_id) {
+	case 0:
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER, 0x1);
+		break;
+	case 1:
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER, 0x1);
+		break;
+	case 2:
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER, 0x1);
+		break;
+	default:
+		break;
+	}
+
+	pr_debug("[DSI_%d] Cmd Test pattern trigger\n", ctrl->index);
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h
new file mode 100644
index 0000000..028ad46
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_REG_H_
+#define _DSI_CTRL_REG_H_
+
+#define DSI_HW_VERSION                             (0x0000)
+#define DSI_CTRL                                   (0x0004)
+#define DSI_STATUS                                 (0x0008)
+#define DSI_FIFO_STATUS                            (0x000C)
+#define DSI_VIDEO_MODE_CTRL                        (0x0010)
+#define DSI_VIDEO_MODE_SYNC_DATATYPE               (0x0014)
+#define DSI_VIDEO_MODE_PIXEL_DATATYPE              (0x0018)
+#define DSI_VIDEO_MODE_BLANKING_DATATYPE           (0x001C)
+#define DSI_VIDEO_MODE_DATA_CTRL                   (0x0020)
+#define DSI_VIDEO_MODE_ACTIVE_H                    (0x0024)
+#define DSI_VIDEO_MODE_ACTIVE_V                    (0x0028)
+#define DSI_VIDEO_MODE_TOTAL                       (0x002C)
+#define DSI_VIDEO_MODE_HSYNC                       (0x0030)
+#define DSI_VIDEO_MODE_VSYNC                       (0x0034)
+#define DSI_VIDEO_MODE_VSYNC_VPOS                  (0x0038)
+#define DSI_COMMAND_MODE_DMA_CTRL                  (0x003C)
+#define DSI_COMMAND_MODE_MDP_CTRL                  (0x0040)
+#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL          (0x0044)
+#define DSI_DMA_CMD_OFFSET                         (0x0048)
+#define DSI_DMA_CMD_LENGTH                         (0x004C)
+#define DSI_DMA_FIFO_CTRL                          (0x0050)
+#define DSI_DMA_NULL_PACKET_DATA                   (0x0054)
+#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL          (0x0058)
+#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL         (0x005C)
+#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL          (0x0060)
+#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL         (0x0064)
+#define DSI_ACK_ERR_STATUS                         (0x0068)
+#define DSI_RDBK_DATA0                             (0x006C)
+#define DSI_RDBK_DATA1                             (0x0070)
+#define DSI_RDBK_DATA2                             (0x0074)
+#define DSI_RDBK_DATA3                             (0x0078)
+#define DSI_RDBK_DATATYPE0                         (0x007C)
+#define DSI_RDBK_DATATYPE1                         (0x0080)
+#define DSI_TRIG_CTRL                              (0x0084)
+#define DSI_EXT_MUX                                (0x0088)
+#define DSI_EXT_MUX_TE_PULSE_DETECT_CTRL           (0x008C)
+#define DSI_CMD_MODE_DMA_SW_TRIGGER                (0x0090)
+#define DSI_CMD_MODE_MDP_SW_TRIGGER                (0x0094)
+#define DSI_CMD_MODE_BTA_SW_TRIGGER                (0x0098)
+#define DSI_RESET_SW_TRIGGER                       (0x009C)
+#define DSI_MISR_CMD_CTRL                          (0x00A0)
+#define DSI_MISR_VIDEO_CTRL                        (0x00A4)
+#define DSI_LANE_STATUS                            (0x00A8)
+#define DSI_LANE_CTRL                              (0x00AC)
+#define DSI_LANE_SWAP_CTRL                         (0x00B0)
+#define DSI_DLN0_PHY_ERR                           (0x00B4)
+#define DSI_LP_TIMER_CTRL                          (0x00B8)
+#define DSI_HS_TIMER_CTRL                          (0x00BC)
+#define DSI_TIMEOUT_STATUS                         (0x00C0)
+#define DSI_CLKOUT_TIMING_CTRL                     (0x00C4)
+#define DSI_EOT_PACKET                             (0x00C8)
+#define DSI_EOT_PACKET_CTRL                        (0x00CC)
+#define DSI_GENERIC_ESC_TX_TRIGGER                 (0x00D0)
+#define DSI_CAM_BIST_CTRL                          (0x00D4)
+#define DSI_CAM_BIST_FRAME_SIZE                    (0x00D8)
+#define DSI_CAM_BIST_BLOCK_SIZE                    (0x00DC)
+#define DSI_CAM_BIST_FRAME_CONFIG                  (0x00E0)
+#define DSI_CAM_BIST_LSFR_CTRL                     (0x00E4)
+#define DSI_CAM_BIST_LSFR_INIT                     (0x00E8)
+#define DSI_CAM_BIST_START                         (0x00EC)
+#define DSI_CAM_BIST_STATUS                        (0x00F0)
+#define DSI_ERR_INT_MASK0                          (0x010C)
+#define DSI_INT_CTRL                               (0x0110)
+#define DSI_IOBIST_CTRL                            (0x0114)
+#define DSI_SOFT_RESET                             (0x0118)
+#define DSI_CLK_CTRL                               (0x011C)
+#define DSI_CLK_STATUS                             (0x0120)
+#define DSI_PHY_SW_RESET                           (0x012C)
+#define DSI_AXI2AHB_CTRL                           (0x0130)
+#define DSI_MISR_CMD_MDP0_32BIT                    (0x0134)
+#define DSI_MISR_CMD_MDP1_32BIT                    (0x0138)
+#define DSI_MISR_CMD_DMA_32BIT                     (0x013C)
+#define DSI_MISR_VIDEO_32BIT                       (0x0140)
+#define DSI_LANE_MISR_CTRL                         (0x0144)
+#define DSI_LANE0_MISR                             (0x0148)
+#define DSI_LANE1_MISR                             (0x014C)
+#define DSI_LANE2_MISR                             (0x0150)
+#define DSI_LANE3_MISR                             (0x0154)
+#define DSI_TEST_PATTERN_GEN_CTRL                  (0x015C)
+#define DSI_TEST_PATTERN_GEN_VIDEO_POLY            (0x0160)
+#define DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL        (0x0164)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY  (0x0168)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0     (0x016C)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY  (0x0170)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1     (0x0174)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_POLY          (0x0178)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL      (0x017C)
+#define DSI_TEST_PATTERN_GEN_VIDEO_ENABLE          (0x0180)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER   (0x0184)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER   (0x0188)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2     (0x018C)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY  (0x0190)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY  (0x0190)
+#define DSI_COMMAND_MODE_MDP_IDLE_CTRL             (0x0194)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER   (0x0198)
+#define DSI_TPG_MAIN_CONTROL                       (0x019C)
+#define DSI_TPG_MAIN_CONTROL2                      (0x01A0)
+#define DSI_TPG_VIDEO_CONFIG                       (0x01A4)
+#define DSI_TPG_COMPONENT_LIMITS                   (0x01A8)
+#define DSI_TPG_RECTANGLE                          (0x01AC)
+#define DSI_TPG_BLACK_WHITE_PATTERN_FRAMES         (0x01B0)
+#define DSI_TPG_RGB_MAPPING                        (0x01B4)
+#define DSI_COMMAND_MODE_MDP_CTRL2                 (0x01B8)
+#define DSI_COMMAND_MODE_MDP_STREAM2_CTRL          (0x01BC)
+#define DSI_COMMAND_MODE_MDP_STREAM2_TOTAL         (0x01C0)
+#define DSI_MISR_CMD_MDP2_8BIT                     (0x01C4)
+#define DSI_MISR_CMD_MDP2_32BIT                    (0x01C8)
+#define DSI_VBIF_CTRL                              (0x01CC)
+#define DSI_AES_CTRL                               (0x01D0)
+#define DSI_RDBK_DATA_CTRL                         (0x01D4)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2     (0x01D8)
+#define DSI_TPG_DMA_FIFO_STATUS                    (0x01DC)
+#define DSI_TPG_DMA_FIFO_WRITE_TRIGGER             (0x01E0)
+#define DSI_DSI_TIMING_FLUSH                       (0x01E4)
+#define DSI_DSI_TIMING_DB_MODE                     (0x01E8)
+#define DSI_TPG_DMA_FIFO_RESET                     (0x01EC)
+#define DSI_SCRATCH_REGISTER_0                     (0x01F0)
+#define DSI_VERSION                                (0x01F4)
+#define DSI_SCRATCH_REGISTER_1                     (0x01F8)
+#define DSI_SCRATCH_REGISTER_2                     (0x01FC)
+#define DSI_DYNAMIC_REFRESH_CTRL                   (0x0200)
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY             (0x0204)
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2            (0x0208)
+#define DSI_DYNAMIC_REFRESH_PLL_DELAY              (0x020C)
+#define DSI_DYNAMIC_REFRESH_STATUS                 (0x0210)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0              (0x0214)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1              (0x0218)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2              (0x021C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3              (0x0220)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4              (0x0224)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5              (0x0228)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6              (0x022C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7              (0x0230)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8              (0x0234)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9              (0x0238)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10             (0x023C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11             (0x0240)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12             (0x0244)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13             (0x0248)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14             (0x024C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15             (0x0250)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16             (0x0254)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17             (0x0258)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18             (0x025C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19             (0x0260)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20             (0x0264)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21             (0x0268)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22             (0x026C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23             (0x0270)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24             (0x0274)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25             (0x0278)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26             (0x027C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27             (0x0280)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28             (0x0284)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29             (0x0288)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30             (0x028C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31             (0x0290)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR         (0x0294)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2        (0x0298)
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL            (0x02A0)
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL2           (0x02A4)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL          (0x02A8)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2         (0x02AC)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL3         (0x02B0)
+#define DSI_COMMAND_MODE_NULL_INSERTION_CTRL       (0x02B4)
+#define DSI_READ_BACK_DISABLE_STATUS               (0x02B8)
+#define DSI_DESKEW_CTRL                            (0x02BC)
+#define DSI_DESKEW_DELAY_CTRL                      (0x02C0)
+#define DSI_DESKEW_SW_TRIGGER                      (0x02C4)
+#define DSI_SECURE_DISPLAY_STATUS                  (0x02CC)
+#define DSI_SECURE_DISPLAY_BLOCK_COMMAND_COLOR     (0x02D0)
+#define DSI_SECURE_DISPLAY_BLOCK_VIDEO_COLOR       (0x02D4)
+
+
+#endif /* _DSI_CTRL_REG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
new file mode 100644
index 0000000..ded7ed3
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DEFS_H_
+#define _DSI_DEFS_H_
+
+#include <linux/types.h>
+
+#define DSI_H_TOTAL(t) (((t)->h_active) + ((t)->h_back_porch) + \
+			((t)->h_sync_width) + ((t)->h_front_porch))
+
+#define DSI_V_TOTAL(t) (((t)->v_active) + ((t)->v_back_porch) + \
+			((t)->v_sync_width) + ((t)->v_front_porch))
+
+/**
+ * enum dsi_pixel_format - DSI pixel formats
+ * @DSI_PIXEL_FORMAT_RGB565:
+ * @DSI_PIXEL_FORMAT_RGB666:
+ * @DSI_PIXEL_FORMAT_RGB666_LOOSE:
+ * @DSI_PIXEL_FORMAT_RGB888:
+ * @DSI_PIXEL_FORMAT_RGB111:
+ * @DSI_PIXEL_FORMAT_RGB332:
+ * @DSI_PIXEL_FORMAT_RGB444:
+ * @DSI_PIXEL_FORMAT_MAX:
+ */
+enum dsi_pixel_format {
+	DSI_PIXEL_FORMAT_RGB565 = 0,
+	DSI_PIXEL_FORMAT_RGB666,
+	DSI_PIXEL_FORMAT_RGB666_LOOSE,
+	DSI_PIXEL_FORMAT_RGB888,
+	DSI_PIXEL_FORMAT_RGB111,
+	DSI_PIXEL_FORMAT_RGB332,
+	DSI_PIXEL_FORMAT_RGB444,
+	DSI_PIXEL_FORMAT_MAX
+};
+
+/**
+ * enum dsi_op_mode - dsi operation mode
+ * @DSI_OP_VIDEO_MODE: DSI video mode operation
+ * @DSI_OP_CMD_MODE:   DSI Command mode operation
+ * @DSI_OP_MODE_MAX:
+ */
+enum dsi_op_mode {
+	DSI_OP_VIDEO_MODE = 0,
+	DSI_OP_CMD_MODE,
+	DSI_OP_MODE_MAX
+};
+
+/**
+ * enum dsi_data_lanes - dsi physical lanes
+ * @DSI_DATA_LANE_0: Physical lane 0
+ * @DSI_DATA_LANE_1: Physical lane 1
+ * @DSI_DATA_LANE_2: Physical lane 2
+ * @DSI_DATA_LANE_3: Physical lane 3
+ * @DSI_CLOCK_LANE:  Physical clock lane
+ */
+enum dsi_data_lanes {
+	DSI_DATA_LANE_0 = BIT(0),
+	DSI_DATA_LANE_1 = BIT(1),
+	DSI_DATA_LANE_2 = BIT(2),
+	DSI_DATA_LANE_3 = BIT(3),
+	DSI_CLOCK_LANE  = BIT(4)
+};
+
+/**
+ * enum dsi_logical_lane - dsi logical lanes
+ * @DSI_LOGICAL_LANE_0:     Logical lane 0
+ * @DSI_LOGICAL_LANE_1:     Logical lane 1
+ * @DSI_LOGICAL_LANE_2:     Logical lane 2
+ * @DSI_LOGICAL_LANE_3:     Logical lane 3
+ * @DSI_LOGICAL_CLOCK_LANE: Clock lane
+ * @DSI_LANE_MAX:           Maximum lanes supported
+ */
+enum dsi_logical_lane {
+	DSI_LOGICAL_LANE_0 = 0,
+	DSI_LOGICAL_LANE_1,
+	DSI_LOGICAL_LANE_2,
+	DSI_LOGICAL_LANE_3,
+	DSI_LOGICAL_CLOCK_LANE,
+	DSI_LANE_MAX
+};
+
+/**
+ * enum dsi_trigger_type - dsi trigger type
+ * @DSI_TRIGGER_NONE:     No trigger.
+ * @DSI_TRIGGER_TE:       TE trigger.
+ * @DSI_TRIGGER_SEOF:     Start or End of frame.
+ * @DSI_TRIGGER_SW:       Software trigger.
+ * @DSI_TRIGGER_SW_SEOF:  Software trigger and start/end of frame.
+ * @DSI_TRIGGER_SW_TE:    Software and TE triggers.
+ * @DSI_TRIGGER_MAX:      Max trigger values.
+ */
+enum dsi_trigger_type {
+	DSI_TRIGGER_NONE = 0,
+	DSI_TRIGGER_TE,
+	DSI_TRIGGER_SEOF,
+	DSI_TRIGGER_SW,
+	DSI_TRIGGER_SW_SEOF,
+	DSI_TRIGGER_SW_TE,
+	DSI_TRIGGER_MAX
+};
+
+/**
+ * enum dsi_color_swap_mode - color swap mode
+ * @DSI_COLOR_SWAP_RGB:
+ * @DSI_COLOR_SWAP_RBG:
+ * @DSI_COLOR_SWAP_BGR:
+ * @DSI_COLOR_SWAP_BRG:
+ * @DSI_COLOR_SWAP_GRB:
+ * @DSI_COLOR_SWAP_GBR:
+ */
+enum dsi_color_swap_mode {
+	DSI_COLOR_SWAP_RGB = 0,
+	DSI_COLOR_SWAP_RBG,
+	DSI_COLOR_SWAP_BGR,
+	DSI_COLOR_SWAP_BRG,
+	DSI_COLOR_SWAP_GRB,
+	DSI_COLOR_SWAP_GBR
+};
+
+/**
+ * enum dsi_dfps_type - Dynamic FPS support type
+ * @DSI_DFPS_NONE:           Dynamic FPS is not supported.
+ * @DSI_DFPS_SUSPEND_RESUME:
+ * @DSI_DFPS_IMMEDIATE_CLK:
+ * @DSI_DFPS_IMMEDIATE_HFP:
+ * @DSI_DFPS_IMMEDIATE_VFP:
+ * @DSI_DPFS_MAX:
+ */
+enum dsi_dfps_type {
+	DSI_DFPS_NONE = 0,
+	DSI_DFPS_SUSPEND_RESUME,
+	DSI_DFPS_IMMEDIATE_CLK,
+	DSI_DFPS_IMMEDIATE_HFP,
+	DSI_DFPS_IMMEDIATE_VFP,
+	DSI_DFPS_MAX
+};
+
+/**
+ * enum dsi_phy_type - DSI phy types
+ * @DSI_PHY_TYPE_DPHY:
+ * @DSI_PHY_TYPE_CPHY:
+ */
+enum dsi_phy_type {
+	DSI_PHY_TYPE_DPHY,
+	DSI_PHY_TYPE_CPHY
+};
+
+/**
+ * enum dsi_te_mode - dsi te source
+ * @DSI_TE_ON_DATA_LINK:    TE read from DSI link
+ * @DSI_TE_ON_EXT_PIN:      TE signal on an external GPIO
+ */
+enum dsi_te_mode {
+	DSI_TE_ON_DATA_LINK = 0,
+	DSI_TE_ON_EXT_PIN,
+};
+
+/**
+ * enum dsi_video_traffic_mode - video mode pixel transmission type
+ * @DSI_VIDEO_TRAFFIC_SYNC_PULSES:       Non-burst mode with sync pulses.
+ * @DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS: Non-burst mode with sync start events.
+ * @DSI_VIDEO_TRAFFIC_BURST_MODE:        Burst mode using sync start events.
+ */
+enum dsi_video_traffic_mode {
+	DSI_VIDEO_TRAFFIC_SYNC_PULSES = 0,
+	DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS,
+	DSI_VIDEO_TRAFFIC_BURST_MODE,
+};
+
+/**
+ * struct dsi_mode_info - video mode information dsi frame
+ * @h_active:         Active width of one frame in pixels.
+ * @h_back_porch:     Horizontal back porch in pixels.
+ * @h_sync_width:     HSYNC width in pixels.
+ * @h_front_porch:    Horizontal fron porch in pixels.
+ * @h_skew:
+ * @h_sync_polarity:  Polarity of HSYNC (false is active low).
+ * @v_active:         Active height of one frame in lines.
+ * @v_back_porch:     Vertical back porch in lines.
+ * @v_sync_width:     VSYNC width in lines.
+ * @v_front_porch:    Vertical front porch in lines.
+ * @v_sync_polarity:  Polarity of VSYNC (false is active low).
+ * @refresh_rate:     Refresh rate in Hz.
+ */
+struct dsi_mode_info {
+	u32 h_active;
+	u32 h_back_porch;
+	u32 h_sync_width;
+	u32 h_front_porch;
+	u32 h_skew;
+	bool h_sync_polarity;
+
+	u32 v_active;
+	u32 v_back_porch;
+	u32 v_sync_width;
+	u32 v_front_porch;
+	bool v_sync_polarity;
+
+	u32 refresh_rate;
+};
+
+/**
+ * struct dsi_lane_mapping - Mapping between DSI logical and physical lanes
+ * @physical_lane0:   Logical lane to which physical lane 0 is mapped.
+ * @physical_lane1:   Logical lane to which physical lane 1 is mapped.
+ * @physical_lane2:   Logical lane to which physical lane 2 is mapped.
+ * @physical_lane3:   Logical lane to which physical lane 3 is mapped.
+ */
+struct dsi_lane_mapping {
+	enum dsi_logical_lane physical_lane0;
+	enum dsi_logical_lane physical_lane1;
+	enum dsi_logical_lane physical_lane2;
+	enum dsi_logical_lane physical_lane3;
+};
+
+/**
+ * struct dsi_host_common_cfg - Host configuration common to video and cmd mode
+ * @dst_format:          Destination pixel format.
+ * @data_lanes:          Physical data lanes to be enabled.
+ * @en_crc_check:        Enable CRC checks.
+ * @en_ecc_check:        Enable ECC checks.
+ * @te_mode:             Source for TE signalling.
+ * @mdp_cmd_trigger:     MDP frame update trigger for command mode.
+ * @dma_cmd_trigger:     Command DMA trigger.
+ * @cmd_trigger_stream:  Command mode stream to trigger.
+ * @bit_swap_read:       Is red color bit swapped.
+ * @bit_swap_green:      Is green color bit swapped.
+ * @bit_swap_blue:       Is blue color bit swapped.
+ * @t_clk_post:          Number of byte clock cycles that the transmitter shall
+ *                       continue sending after last data lane has transitioned
+ *                       to LP mode.
+ * @t_clk_pre:           Number of byte clock cycles that the high spped clock
+ *                       shall be driven prior to data lane transitions from LP
+ *                       to HS mode.
+ * @ignore_rx_eot:       Ignore Rx EOT packets if set to true.
+ * @append_tx_eot:       Append EOT packets for forward transmissions if set to
+ *                       true.
+ */
+struct dsi_host_common_cfg {
+	enum dsi_pixel_format dst_format;
+	enum dsi_data_lanes data_lanes;
+	bool en_crc_check;
+	bool en_ecc_check;
+	enum dsi_te_mode te_mode;
+	enum dsi_trigger_type mdp_cmd_trigger;
+	enum dsi_trigger_type dma_cmd_trigger;
+	u32 cmd_trigger_stream;
+	enum dsi_color_swap_mode swap_mode;
+	bool bit_swap_red;
+	bool bit_swap_green;
+	bool bit_swap_blue;
+	u32 t_clk_post;
+	u32 t_clk_pre;
+	bool ignore_rx_eot;
+	bool append_tx_eot;
+};
+
+/**
+ * struct dsi_video_engine_cfg - DSI video engine configuration
+ * @host_cfg:                  Pointer to host common configuration.
+ * @last_line_interleave_en:   Allow command mode op interleaved on last line of
+ *                             video stream.
+ * @pulse_mode_hsa_he:         Send HSA and HE following VS/VE packet if set to
+ *                             true.
+ * @hfp_lp11_en:               Enter low power stop mode (LP-11) during HFP.
+ * @hbp_lp11_en:               Enter low power stop mode (LP-11) during HBP.
+ * @hsa_lp11_en:               Enter low power stop mode (LP-11) during HSA.
+ * @eof_bllp_lp11_en:          Enter low power stop mode (LP-11) during BLLP of
+ *                             last line of a frame.
+ * @bllp_lp11_en:              Enter low power stop mode (LP-11) during BLLP.
+ * @traffic_mode:              Traffic mode for video stream.
+ * @vc_id:                     Virtual channel identifier.
+ */
+struct dsi_video_engine_cfg {
+	bool last_line_interleave_en;
+	bool pulse_mode_hsa_he;
+	bool hfp_lp11_en;
+	bool hbp_lp11_en;
+	bool hsa_lp11_en;
+	bool eof_bllp_lp11_en;
+	bool bllp_lp11_en;
+	enum dsi_video_traffic_mode traffic_mode;
+	u32 vc_id;
+};
+
+/**
+ * struct dsi_cmd_engine_cfg - DSI command engine configuration
+ * @host_cfg:                  Pointer to host common configuration.
+ * @host_cfg:                      Common host configuration
+ * @max_cmd_packets_interleave     Maximum number of command mode RGB packets to
+ *                                 send with in one horizontal blanking period
+ *                                 of the video mode frame.
+ * @wr_mem_start:                  DCS command for write_memory_start.
+ * @wr_mem_continue:               DCS command for write_memory_continue.
+ * @insert_dcs_command:            Insert DCS command as first byte of payload
+ *                                 of the pixel data.
+ */
+struct dsi_cmd_engine_cfg {
+	u32 max_cmd_packets_interleave;
+	u32 wr_mem_start;
+	u32 wr_mem_continue;
+	bool insert_dcs_command;
+};
+
+/**
+ * struct dsi_host_config - DSI host configuration parameters.
+ * @panel_mode:            Operation mode for panel (video or cmd mode).
+ * @common_config:         Host configuration common to both Video and Cmd mode.
+ * @video_engine:          Video engine configuration if panel is in video mode.
+ * @cmd_engine:            Cmd engine configuration if panel is in cmd mode.
+ * @esc_clk_rate_khz:      Esc clock frequency in Hz.
+ * @bit_clk_rate_hz:       Bit clock frequency in Hz.
+ * @video_timing:          Video timing information of a frame.
+ * @lane_map:              Mapping between logical and physical lanes.
+ * @phy_type:              PHY type to be used.
+ */
+struct dsi_host_config {
+	enum dsi_op_mode panel_mode;
+	struct dsi_host_common_cfg common_config;
+	union {
+		struct dsi_video_engine_cfg video_engine;
+		struct dsi_cmd_engine_cfg cmd_engine;
+	} u;
+	u64 esc_clk_rate_hz;
+	u64 bit_clk_rate_hz;
+	struct dsi_mode_info video_timing;
+	struct dsi_lane_mapping lane_map;
+};
+
+/**
+ * struct dsi_display_mode - specifies mode for dsi display
+ * @timing:         Timing parameters for the panel.
+ * @pixel_clk_khz:  Pixel clock in Khz.
+ * @panel_mode:     Panel operation mode.
+ * @flags:          Additional flags.
+ */
+struct dsi_display_mode {
+	struct dsi_mode_info timing;
+	u32 pixel_clk_khz;
+	enum dsi_op_mode panel_mode;
+
+	u32 flags;
+};
+
+#endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
new file mode 100644
index 0000000..01535c0
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_HW_H_
+#define _DSI_HW_H_
+#include <linux/io.h>
+
+#define DSI_R32(dsi_hw, off) readl_relaxed((dsi_hw)->base + (off))
+#define DSI_W32(dsi_hw, off, val) \
+	do {\
+		pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
+			(dsi_hw)->index, #off, val); \
+		writel_relaxed((val), (dsi_hw)->base + (off)); \
+	} while (0)
+
+#define DSI_MMSS_MISC_R32(dsi_hw, off) \
+	readl_relaxed((dsi_hw)->mmss_misc_base + (off))
+#define DSI_MMSS_MISC_W32(dsi_hw, off, val) \
+	do {\
+		pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
+			(dsi_hw)->index, #off, val); \
+		writel_relaxed((val), (dsi_hw)->mmss_misc_base + (off)); \
+	} while (0)
+
+#define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
+#define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
+
+#endif /* _DSI_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
new file mode 100644
index 0000000..5edfd5e
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DSI_PHY_HW_H_
+#define _DSI_PHY_HW_H_
+
+#include "dsi_defs.h"
+
+#define DSI_MAX_SETTINGS 8
+
+/**
+ * enum dsi_phy_version - DSI PHY version enumeration
+ * @DSI_PHY_VERSION_UNKNOWN:    Unknown version.
+ * @DSI_PHY_VERSION_1_0:        28nm-HPM.
+ * @DSI_PHY_VERSION_2_0:        28nm-LPM.
+ * @DSI_PHY_VERSION_3_0:        20nm.
+ * @DSI_PHY_VERSION_4_0:        14nm.
+ * @DSI_PHY_VERSION_MAX:
+ */
+enum dsi_phy_version {
+	DSI_PHY_VERSION_UNKNOWN,
+	DSI_PHY_VERSION_1_0, /* 28nm-HPM */
+	DSI_PHY_VERSION_2_0, /* 28nm-LPM */
+	DSI_PHY_VERSION_3_0, /* 20nm */
+	DSI_PHY_VERSION_4_0, /* 14nm */
+	DSI_PHY_VERSION_MAX
+};
+
+/**
+ * enum dsi_phy_hw_features - features supported by DSI PHY hardware
+ * @DSI_PHY_DPHY:        Supports DPHY
+ * @DSI_PHY_CPHY:        Supports CPHY
+ */
+enum dsi_phy_hw_features {
+	DSI_PHY_DPHY,
+	DSI_PHY_CPHY,
+	DSI_PHY_MAX_FEATURES
+};
+
+/**
+ * enum dsi_phy_pll_source - pll clock source for PHY.
+ * @DSI_PLL_SOURCE_STANDALONE:    Clock is sourced from native PLL and is not
+ *				  shared by other PHYs.
+ * @DSI_PLL_SOURCE_NATIVE:        Clock is sourced from native PLL and is
+ *				  shared by other PHYs.
+ * @DSI_PLL_SOURCE_NON_NATIVE:    Clock is sourced from other PHYs.
+ * @DSI_PLL_SOURCE_MAX:
+ */
+enum dsi_phy_pll_source {
+	DSI_PLL_SOURCE_STANDALONE = 0,
+	DSI_PLL_SOURCE_NATIVE,
+	DSI_PLL_SOURCE_NON_NATIVE,
+	DSI_PLL_SOURCE_MAX
+};
+
+/**
+ * struct dsi_phy_per_lane_cfgs - Holds register values for PHY parameters
+ * @lane:           A set of maximum 8 values for each lane.
+ * @count_per_lane: Number of values per each lane.
+ */
+struct dsi_phy_per_lane_cfgs {
+	u8 lane[DSI_LANE_MAX][DSI_MAX_SETTINGS];
+	u32 count_per_lane;
+};
+
+/**
+ * struct dsi_phy_cfg - DSI PHY configuration
+ * @lanecfg:          Lane configuration settings.
+ * @strength:         Strength settings for lanes.
+ * @timing:           Timing parameters for lanes.
+ * @regulators:       Regulator settings for lanes.
+ * @pll_source:       PLL source.
+ */
+struct dsi_phy_cfg {
+	struct dsi_phy_per_lane_cfgs lanecfg;
+	struct dsi_phy_per_lane_cfgs strength;
+	struct dsi_phy_per_lane_cfgs timing;
+	struct dsi_phy_per_lane_cfgs regulators;
+	enum dsi_phy_pll_source pll_source;
+};
+
+struct dsi_phy_hw;
+
+/**
+ * struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
+ * @regulator_enable:          Enable PHY regulators.
+ * @regulator_disable:         Disable PHY regulators.
+ * @enable:                    Enable PHY.
+ * @disable:                   Disable PHY.
+ * @calculate_timing_params:   Calculate PHY timing params from mode information
+ */
+struct dsi_phy_hw_ops {
+	/**
+	 * regulator_enable() - enable regulators for DSI PHY
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 * @reg_cfg:  Regulator configuration for all DSI lanes.
+	 */
+	void (*regulator_enable)(struct dsi_phy_hw *phy,
+				 struct dsi_phy_per_lane_cfgs *reg_cfg);
+
+	/**
+	 * regulator_disable() - disable regulators
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 */
+	void (*regulator_disable)(struct dsi_phy_hw *phy);
+
+	/**
+	 * enable() - Enable PHY hardware
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 * @cfg:      Per lane configurations for timing, strength and lane
+	 *	      configurations.
+	 */
+	void (*enable)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+
+	/**
+	 * disable() - Disable PHY hardware
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 */
+	void (*disable)(struct dsi_phy_hw *phy);
+
+	/**
+	 * calculate_timing_params() - calculates timing parameters.
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 * @mode:     Mode information for which timing has to be calculated.
+	 * @config:   DSI host configuration for this mode.
+	 * @timing:   Timing parameters for each lane which will be returned.
+	 */
+	int (*calculate_timing_params)(struct dsi_phy_hw *phy,
+				       struct dsi_mode_info *mode,
+				       struct dsi_host_common_cfg *config,
+				       struct dsi_phy_per_lane_cfgs *timing);
+};
+
+/**
+ * struct dsi_phy_hw - DSI phy hardware object specific to an instance
+ * @base:                  VA for the DSI PHY base address.
+ * @length:                Length of the DSI PHY register base map.
+ * @index:                 Instance ID of the controller.
+ * @version:               DSI PHY version.
+ * @feature_map:           Features supported by DSI PHY.
+ * @ops:                   Function pointer to PHY operations.
+ */
+struct dsi_phy_hw {
+	void __iomem *base;
+	u32 length;
+	u32 index;
+
+	enum dsi_phy_version version;
+
+	DECLARE_BITMAP(feature_map, DSI_PHY_MAX_FEATURES);
+	struct dsi_phy_hw_ops ops;
+};
+
+#endif /* _DSI_PHY_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
new file mode 100644
index 0000000..512352d
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "dsi-phy-hw:" fmt
+#include <linux/math64.h>
+#include <linux/delay.h>
+#include "dsi_hw.h"
+#include "dsi_phy_hw.h"
+
+#define DSIPHY_CMN_REVISION_ID0                   0x0000
+#define DSIPHY_CMN_REVISION_ID1                   0x0004
+#define DSIPHY_CMN_REVISION_ID2                   0x0008
+#define DSIPHY_CMN_REVISION_ID3                   0x000C
+#define DSIPHY_CMN_CLK_CFG0                       0x0010
+#define DSIPHY_CMN_CLK_CFG1                       0x0014
+#define DSIPHY_CMN_GLBL_TEST_CTRL                 0x0018
+#define DSIPHY_CMN_CTRL_0                         0x001C
+#define DSIPHY_CMN_CTRL_1                         0x0020
+#define DSIPHY_CMN_CAL_HW_TRIGGER                 0x0024
+#define DSIPHY_CMN_CAL_SW_CFG0                    0x0028
+#define DSIPHY_CMN_CAL_SW_CFG1                    0x002C
+#define DSIPHY_CMN_CAL_SW_CFG2                    0x0030
+#define DSIPHY_CMN_CAL_HW_CFG0                    0x0034
+#define DSIPHY_CMN_CAL_HW_CFG1                    0x0038
+#define DSIPHY_CMN_CAL_HW_CFG2                    0x003C
+#define DSIPHY_CMN_CAL_HW_CFG3                    0x0040
+#define DSIPHY_CMN_CAL_HW_CFG4                    0x0044
+#define DSIPHY_CMN_PLL_CNTRL                      0x0048
+#define DSIPHY_CMN_LDO_CNTRL                      0x004C
+
+#define DSIPHY_CMN_REGULATOR_CAL_STATUS0          0x0064
+#define DSIPHY_CMN_REGULATOR_CAL_STATUS1          0x0068
+
+/* n = 0..3 for data lanes and n = 4 for clock lane */
+#define DSIPHY_DLNX_CFG0(n)                     (0x100 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG1(n)                     (0x104 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG2(n)                     (0x108 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG3(n)                     (0x10C + ((n) * 0x80))
+#define DSIPHY_DLNX_TEST_DATAPATH(n)            (0x110 + ((n) * 0x80))
+#define DSIPHY_DLNX_TEST_STR(n)                 (0x114 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_4(n)            (0x118 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_5(n)            (0x11C + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_6(n)            (0x120 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_7(n)            (0x124 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_8(n)            (0x128 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_9(n)            (0x12C + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_10(n)           (0x130 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_11(n)           (0x134 + ((n) * 0x80))
+#define DSIPHY_DLNX_STRENGTH_CTRL_0(n)          (0x138 + ((n) * 0x80))
+#define DSIPHY_DLNX_STRENGTH_CTRL_1(n)          (0x13C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_POLY(n)                (0x140 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SEED0(n)               (0x144 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SEED1(n)               (0x148 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_HEAD(n)                (0x14C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SOT(n)                 (0x150 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL0(n)               (0x154 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL1(n)               (0x158 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL2(n)               (0x15C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL3(n)               (0x160 + ((n) * 0x80))
+#define DSIPHY_DLNX_VREG_CNTRL(n)               (0x164 + ((n) * 0x80))
+#define DSIPHY_DLNX_HSTX_STR_STATUS(n)          (0x168 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS0(n)             (0x16C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS1(n)             (0x170 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS2(n)             (0x174 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS3(n)             (0x178 + ((n) * 0x80))
+#define DSIPHY_DLNX_MISR_STATUS(n)              (0x17C + ((n) * 0x80))
+
+#define DSIPHY_PLL_CLKBUFLR_EN                  0x041C
+#define DSIPHY_PLL_PLL_BANDGAP                  0x0508
+
+/**
+ * struct timing_entry - Calculated values for each timing parameter.
+ * @mipi_min:
+ * @mipi_max:
+ * @rec_min:
+ * @rec_max:
+ * @rec:
+ * @reg_value:       Value to be programmed in register.
+ */
+struct timing_entry {
+	s32 mipi_min;
+	s32 mipi_max;
+	s32 rec_min;
+	s32 rec_max;
+	s32 rec;
+	u8 reg_value;
+};
+
+/**
+ * struct phy_timing_desc - Timing parameters for DSI PHY.
+ */
+struct phy_timing_desc {
+	struct timing_entry clk_prepare;
+	struct timing_entry clk_zero;
+	struct timing_entry clk_trail;
+	struct timing_entry hs_prepare;
+	struct timing_entry hs_zero;
+	struct timing_entry hs_trail;
+	struct timing_entry hs_rqst;
+	struct timing_entry hs_rqst_clk;
+	struct timing_entry hs_exit;
+	struct timing_entry ta_go;
+	struct timing_entry ta_sure;
+	struct timing_entry ta_set;
+	struct timing_entry clk_post;
+	struct timing_entry clk_pre;
+};
+
+/**
+ * struct phy_clk_params - Clock parameters for PHY timing calculations.
+ */
+struct phy_clk_params {
+	u32 bitclk_mbps;
+	u32 escclk_numer;
+	u32 escclk_denom;
+	u32 tlpx_numer_ns;
+	u32 treot_ns;
+};
+
+/**
+ * regulator_enable() - enable regulators for DSI PHY
+ * @phy:      Pointer to DSI PHY hardware object.
+ * @reg_cfg:  Regulator configuration for all DSI lanes.
+ */
+void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy,
+				      struct dsi_phy_per_lane_cfgs *reg_cfg)
+{
+	int i;
+
+	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
+		DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(i), reg_cfg->lane[i][0]);
+
+	/* make sure all values are written to hardware */
+	wmb();
+
+	pr_debug("[DSI_%d] Phy regulators enabled\n", phy->index);
+}
+
+/**
+ * regulator_disable() - disable regulators
+ * @phy:      Pointer to DSI PHY hardware object.
+ */
+void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy)
+{
+	pr_debug("[DSI_%d] Phy regulators disabled\n", phy->index);
+}
+
+/**
+ * enable() - Enable PHY hardware
+ * @phy:      Pointer to DSI PHY hardware object.
+ * @cfg:      Per lane configurations for timing, strength and lane
+ *	      configurations.
+ */
+void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy,
+			    struct dsi_phy_cfg *cfg)
+{
+	int i;
+	struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
+	u32 data;
+
+	DSI_W32(phy, DSIPHY_CMN_LDO_CNTRL, 0x1C);
+
+	DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0x1);
+	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+
+		DSI_W32(phy, DSIPHY_DLNX_CFG0(i), cfg->lanecfg.lane[i][0]);
+		DSI_W32(phy, DSIPHY_DLNX_CFG1(i), cfg->lanecfg.lane[i][1]);
+		DSI_W32(phy, DSIPHY_DLNX_CFG2(i), cfg->lanecfg.lane[i][2]);
+		DSI_W32(phy, DSIPHY_DLNX_CFG3(i), cfg->lanecfg.lane[i][3]);
+
+		DSI_W32(phy, DSIPHY_DLNX_TEST_STR(i), 0x88);
+
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_4(i), timing->lane[i][0]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_5(i), timing->lane[i][1]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_6(i), timing->lane[i][2]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_7(i), timing->lane[i][3]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_8(i), timing->lane[i][4]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_9(i), timing->lane[i][5]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_10(i), timing->lane[i][6]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_11(i), timing->lane[i][7]);
+
+		DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_0(i),
+			cfg->strength.lane[i][0]);
+		DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_1(i),
+			cfg->strength.lane[i][1]);
+	}
+
+	/* make sure all values are written to hardware before enabling phy */
+	wmb();
+
+	DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x80);
+	udelay(100);
+	DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x00);
+
+	data = DSI_R32(phy, DSIPHY_CMN_GLBL_TEST_CTRL);
+
+	switch (cfg->pll_source) {
+	case DSI_PLL_SOURCE_STANDALONE:
+		DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x01);
+		data &= ~BIT(2);
+		break;
+	case DSI_PLL_SOURCE_NATIVE:
+		DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x03);
+		data &= ~BIT(2);
+		break;
+	case DSI_PLL_SOURCE_NON_NATIVE:
+		DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x00);
+		data |= BIT(2);
+		break;
+	default:
+		break;
+	}
+
+	DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, data);
+
+	/* Enable bias current for pll1 during split display case */
+	if (cfg->pll_source == DSI_PLL_SOURCE_NON_NATIVE)
+		DSI_W32(phy, DSIPHY_PLL_PLL_BANDGAP, 0x3);
+
+	pr_debug("[DSI_%d]Phy enabled ", phy->index);
+}
+
+/**
+ * disable() - Disable PHY hardware
+ * @phy:      Pointer to DSI PHY hardware object.
+ */
+void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy)
+{
+	DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0);
+	DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0);
+	pr_debug("[DSI_%d]Phy disabled ", phy->index);
+}
+
+static const u32 bits_per_pixel[DSI_PIXEL_FORMAT_MAX] = {
+	16, 18, 18, 24, 3, 8, 12 };
+
+/**
+ * calc_clk_prepare - calculates prepare timing params for clk lane.
+ */
+static int calc_clk_prepare(struct phy_clk_params *clk_params,
+			    struct phy_timing_desc *desc,
+			    s32 *actual_frac,
+			    s64 *actual_intermediate)
+{
+	u32 const min_prepare_frac = 50;
+	u64 const multiplier = BIT(20);
+
+	struct timing_entry *t = &desc->clk_prepare;
+	int rc = 0;
+	u64 dividend, temp, temp_multiple;
+	s32 frac = 0;
+	s64 intermediate;
+	s64 clk_prep_actual;
+
+	dividend = ((t->rec_max - t->rec_min) * min_prepare_frac * multiplier);
+	temp  = roundup(div_s64(dividend, 100), multiplier);
+	temp += (t->rec_min * multiplier);
+	t->rec = div_s64(temp, multiplier);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor clk_prepare\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	/* calculate theoretical value */
+	temp_multiple = 8 * t->reg_value * clk_params->tlpx_numer_ns
+			 * multiplier;
+	intermediate = div_s64(temp_multiple, clk_params->bitclk_mbps);
+	div_s64_rem(temp_multiple, clk_params->bitclk_mbps, &frac);
+	clk_prep_actual = div_s64((intermediate + frac), multiplier);
+
+	pr_debug("CLK_PREPARE:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max);
+	pr_debug(" reg_value=%d, actual=%lld\n", t->reg_value, clk_prep_actual);
+
+	*actual_frac = frac;
+	*actual_intermediate = intermediate;
+
+	return rc;
+}
+
+/**
+ * calc_clk_zero - calculates zero timing params for clk lane.
+ */
+static int calc_clk_zero(struct phy_clk_params *clk_params,
+			 struct phy_timing_desc *desc,
+			 s32 actual_frac,
+			 s64 actual_intermediate)
+{
+	u32 const clk_zero_min_frac = 2;
+	u64 const multiplier = BIT(20);
+
+	int rc = 0;
+	struct timing_entry *t = &desc->clk_zero;
+	s64 mipi_min, rec_temp1, rec_temp2, rec_temp3, rec_min;
+
+	mipi_min = ((300 * multiplier) - (actual_intermediate + actual_frac));
+	t->mipi_min = div_s64(mipi_min, multiplier);
+
+	rec_temp1 = div_s64((mipi_min * clk_params->bitclk_mbps),
+			    clk_params->tlpx_numer_ns);
+	rec_temp2 = (rec_temp1 - (11 * multiplier));
+	rec_temp3 = roundup(div_s64(rec_temp2, 8), multiplier);
+	rec_min = (div_s64(rec_temp3, multiplier) - 3);
+	t->rec_min = rec_min;
+	t->rec_max = ((t->rec_min > 255) ? 511 : 255);
+
+	t->rec = DIV_ROUND_UP(
+			(((t->rec_max - t->rec_min) * clk_zero_min_frac) +
+			 (t->rec_min * 100)),
+			100);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor clk_zero\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("CLK_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+	return rc;
+}
+
+/**
+ * calc_clk_trail - calculates prepare trail params for clk lane.
+ */
+static int calc_clk_trail(struct phy_clk_params *clk_params,
+			  struct phy_timing_desc *desc,
+			  s64 *teot_clk_lane)
+{
+	u64 const multiplier = BIT(20);
+	u32 const phy_timing_frac = 30;
+
+	int rc = 0;
+	struct timing_entry *t = &desc->clk_trail;
+	u64 temp_multiple;
+	s32 frac;
+	s64 mipi_max_tr, rec_temp1, rec_temp2, rec_temp3, mipi_max;
+	s64 teot_clk_lane1;
+
+	temp_multiple = div_s64(
+			(12 * multiplier * clk_params->tlpx_numer_ns),
+			clk_params->bitclk_mbps);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+
+	mipi_max_tr = ((105 * multiplier) +
+		       (temp_multiple + frac));
+	teot_clk_lane1 = div_s64(mipi_max_tr, multiplier);
+
+	mipi_max = (mipi_max_tr - (clk_params->treot_ns * multiplier));
+	t->mipi_max = div_s64(mipi_max, multiplier);
+
+	temp_multiple = div_s64(
+			(t->mipi_min * multiplier * clk_params->bitclk_mbps),
+			clk_params->tlpx_numer_ns);
+
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	rec_temp1 = temp_multiple + frac + (3 * multiplier);
+	rec_temp2 = div_s64(rec_temp1, 8);
+	rec_temp3 = roundup(rec_temp2, multiplier);
+
+	t->rec_min = div_s64(rec_temp3, multiplier);
+
+	/* recommended max */
+	rec_temp1 = div_s64((mipi_max * clk_params->bitclk_mbps),
+			    clk_params->tlpx_numer_ns);
+	rec_temp2 = rec_temp1 + (3 * multiplier);
+	rec_temp3 = rec_temp2 / 8;
+	t->rec_max = div_s64(rec_temp3, multiplier);
+
+	t->rec = DIV_ROUND_UP(
+		(((t->rec_max - t->rec_min) * phy_timing_frac) +
+		 (t->rec_min * 100)),
+		 100);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor clk_zero\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	*teot_clk_lane = teot_clk_lane1;
+	pr_debug("CLK_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+	return rc;
+
+}
+
+/**
+ * calc_hs_prepare - calculates prepare timing params for data lanes in HS.
+ */
+static int calc_hs_prepare(struct phy_clk_params *clk_params,
+			   struct phy_timing_desc *desc,
+			   u64 *temp_mul)
+{
+	u64 const multiplier = BIT(20);
+	u32 const min_prepare_frac = 50;
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_prepare;
+	u64 temp_multiple, dividend, temp;
+	s32 frac;
+	s64 rec_temp1, rec_temp2, mipi_max, mipi_min;
+	u32 low_clk_multiplier = 0;
+
+	if (clk_params->bitclk_mbps <= 120)
+		low_clk_multiplier = 2;
+	/* mipi min */
+	temp_multiple = div_s64((4 * multiplier * clk_params->tlpx_numer_ns),
+				clk_params->bitclk_mbps);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	mipi_min = (40 * multiplier) + (temp_multiple + frac);
+	t->mipi_min = div_s64(mipi_min, multiplier);
+
+	/* mipi_max */
+	temp_multiple = div_s64(
+			(6 * multiplier * clk_params->tlpx_numer_ns),
+			clk_params->bitclk_mbps);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	mipi_max = (85 * multiplier) + temp_multiple;
+	t->mipi_max = div_s64(mipi_max, multiplier);
+
+	/* recommended min */
+	temp_multiple = div_s64((mipi_min * clk_params->bitclk_mbps),
+				clk_params->tlpx_numer_ns);
+	temp_multiple -= (low_clk_multiplier * multiplier);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	rec_temp1 = roundup(((temp_multiple + frac) / 8), multiplier);
+	t->rec_min = div_s64(rec_temp1, multiplier);
+
+	/* recommended max */
+	temp_multiple = div_s64((mipi_max * clk_params->bitclk_mbps),
+				clk_params->tlpx_numer_ns);
+	temp_multiple -= (low_clk_multiplier * multiplier);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	rec_temp2 = rounddown((temp_multiple / 8), multiplier);
+	t->rec_max = div_s64(rec_temp2, multiplier);
+
+	/* register value */
+	dividend = ((rec_temp2 - rec_temp1) * min_prepare_frac);
+	temp = roundup(div_u64(dividend, 100), multiplier);
+	t->rec = div_s64((temp + rec_temp1), multiplier);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_prepare\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	temp_multiple = div_s64(
+			(8 * (temp + rec_temp1) * clk_params->tlpx_numer_ns),
+			clk_params->bitclk_mbps);
+
+	*temp_mul = temp_multiple;
+	pr_debug("HS_PREP:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+	return rc;
+}
+
+/**
+ * calc_hs_zero - calculates zero timing params for data lanes in HS.
+ */
+static int calc_hs_zero(struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc,
+			u64 temp_multiple)
+{
+	u32 const hs_zero_min_frac = 10;
+	u64 const multiplier = BIT(20);
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_zero;
+	s64 rec_temp1, rec_temp2, rec_temp3, mipi_min;
+	s64 rec_min;
+
+	mipi_min = div_s64((10 * clk_params->tlpx_numer_ns * multiplier),
+			   clk_params->bitclk_mbps);
+	rec_temp1 = (145 * multiplier) + mipi_min - temp_multiple;
+	t->mipi_min = div_s64(rec_temp1, multiplier);
+
+	/* recommended min */
+	rec_temp1 = div_s64((rec_temp1 * clk_params->bitclk_mbps),
+			    clk_params->tlpx_numer_ns);
+	rec_temp2 = rec_temp1 - (11 * multiplier);
+	rec_temp3 = roundup((rec_temp2 / 8), multiplier);
+	rec_min = rec_temp3 - (3 * multiplier);
+	t->rec_min =  div_s64(rec_min, multiplier);
+	t->rec_max = ((t->rec_min > 255) ? 511 : 255);
+
+	t->rec = DIV_ROUND_UP(
+			(((t->rec_max - t->rec_min) * hs_zero_min_frac) +
+			 (t->rec_min * 100)),
+			100);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_zero\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_hs_trail - calculates trail timing params for data lanes in HS.
+ */
+static int calc_hs_trail(struct phy_clk_params *clk_params,
+			 struct phy_timing_desc *desc,
+			 u64 teot_clk_lane)
+{
+	u32 const phy_timing_frac = 30;
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_trail;
+	s64 rec_temp1;
+
+	t->mipi_min = 60 +
+			mult_frac(clk_params->tlpx_numer_ns, 4,
+				  clk_params->bitclk_mbps);
+
+	t->mipi_max = teot_clk_lane - clk_params->treot_ns;
+
+	t->rec_min = DIV_ROUND_UP(
+		((t->mipi_min * clk_params->bitclk_mbps) +
+		 (3 * clk_params->tlpx_numer_ns)),
+		(8 * clk_params->tlpx_numer_ns));
+
+	rec_temp1 = ((t->mipi_max * clk_params->bitclk_mbps) +
+		     (3 * clk_params->tlpx_numer_ns));
+	t->rec_max = (rec_temp1 / (8 * clk_params->tlpx_numer_ns));
+	rec_temp1 = DIV_ROUND_UP(
+			((t->rec_max - t->rec_min) * phy_timing_frac),
+			100);
+	t->rec = rec_temp1 + t->rec_min;
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_trail\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_hs_rqst - calculates rqst timing params for data lanes in HS.
+ */
+static int calc_hs_rqst(struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc)
+{
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_rqst;
+
+	t->rec = DIV_ROUND_UP(
+		((t->mipi_min * clk_params->bitclk_mbps) -
+		 (8 * clk_params->tlpx_numer_ns)),
+		(8 * clk_params->tlpx_numer_ns));
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_rqst, %d\n", t->rec);
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_RQST:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_hs_exit - calculates exit timing params for data lanes in HS.
+ */
+static int calc_hs_exit(struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc)
+{
+	u32 const hs_exit_min_frac = 10;
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_exit;
+
+	t->rec_min = (DIV_ROUND_UP(
+			(t->mipi_min * clk_params->bitclk_mbps),
+			(8 * clk_params->tlpx_numer_ns)) - 1);
+
+	t->rec = DIV_ROUND_UP(
+		(((t->rec_max - t->rec_min) * hs_exit_min_frac) +
+		 (t->rec_min * 100)),
+		100);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_exit\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_EXIT:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_hs_rqst_clk - calculates rqst timing params for clock lane..
+ */
+static int calc_hs_rqst_clk(struct phy_clk_params *clk_params,
+			    struct phy_timing_desc *desc)
+{
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_rqst_clk;
+
+	t->rec = DIV_ROUND_UP(
+		((t->mipi_min * clk_params->bitclk_mbps) -
+		 (8 * clk_params->tlpx_numer_ns)),
+		(8 * clk_params->tlpx_numer_ns));
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_rqst_clk\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_RQST_CLK:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * dsi_phy_calc_timing_params - calculates timing paramets for a given bit clock
+ */
+static int dsi_phy_calc_timing_params(struct phy_clk_params *clk_params,
+				      struct phy_timing_desc *desc)
+{
+	int rc = 0;
+	s32 actual_frac = 0;
+	s64 actual_intermediate = 0;
+	u64 temp_multiple;
+	s64 teot_clk_lane;
+
+	rc = calc_clk_prepare(clk_params, desc, &actual_frac,
+			      &actual_intermediate);
+	if (rc) {
+		pr_err("clk_prepare calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_clk_zero(clk_params, desc, actual_frac, actual_intermediate);
+	if (rc) {
+		pr_err("clk_zero calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_clk_trail(clk_params, desc, &teot_clk_lane);
+	if (rc) {
+		pr_err("clk_trail calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_prepare(clk_params, desc, &temp_multiple);
+	if (rc) {
+		pr_err("hs_prepare calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_zero(clk_params, desc, temp_multiple);
+	if (rc) {
+		pr_err("hs_zero calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_trail(clk_params, desc, teot_clk_lane);
+	if (rc) {
+		pr_err("hs_trail calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_rqst(clk_params, desc);
+	if (rc) {
+		pr_err("hs_rqst calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_exit(clk_params, desc);
+	if (rc) {
+		pr_err("hs_exit calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_rqst_clk(clk_params, desc);
+	if (rc) {
+		pr_err("hs_rqst_clk calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+error:
+	return rc;
+}
+
+/**
+ * calculate_timing_params() - calculates timing parameters.
+ * @phy:      Pointer to DSI PHY hardware object.
+ * @mode:     Mode information for which timing has to be calculated.
+ * @config:   DSI host configuration for this mode.
+ * @timing:   Timing parameters for each lane which will be returned.
+ */
+int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy,
+					    struct dsi_mode_info *mode,
+					    struct dsi_host_common_cfg *host,
+					   struct dsi_phy_per_lane_cfgs *timing)
+{
+	/* constants */
+	u32 const esc_clk_mhz = 192; /* TODO: esc clock is hardcoded */
+	u32 const esc_clk_mmss_cc_prediv = 10;
+	u32 const tlpx_numer = 1000;
+	u32 const tr_eot = 20;
+	u32 const clk_prepare_spec_min = 38;
+	u32 const clk_prepare_spec_max = 95;
+	u32 const clk_trail_spec_min = 60;
+	u32 const hs_exit_spec_min = 100;
+	u32 const hs_exit_reco_max = 255;
+	u32 const hs_rqst_spec_min = 50;
+
+	/* local vars */
+	int rc = 0;
+	int i;
+	u32 h_total, v_total;
+	u64 inter_num;
+	u32 num_of_lanes = 0;
+	u32 bpp;
+	u64 x, y;
+	struct phy_timing_desc desc;
+	struct phy_clk_params clk_params = {0};
+
+	memset(&desc, 0x0, sizeof(desc));
+	h_total = DSI_H_TOTAL(mode);
+	v_total = DSI_V_TOTAL(mode);
+
+	bpp = bits_per_pixel[host->dst_format];
+
+	inter_num = bpp * mode->refresh_rate;
+
+	if (host->data_lanes & DSI_DATA_LANE_0)
+		num_of_lanes++;
+	if (host->data_lanes & DSI_DATA_LANE_1)
+		num_of_lanes++;
+	if (host->data_lanes & DSI_DATA_LANE_2)
+		num_of_lanes++;
+	if (host->data_lanes & DSI_DATA_LANE_3)
+		num_of_lanes++;
+
+
+	x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
+	y = rounddown(x, 1);
+
+	clk_params.bitclk_mbps = rounddown(mult_frac(y, 1, 1000000), 1);
+	clk_params.escclk_numer = esc_clk_mhz;
+	clk_params.escclk_denom = esc_clk_mmss_cc_prediv;
+	clk_params.tlpx_numer_ns = tlpx_numer;
+	clk_params.treot_ns = tr_eot;
+
+
+	/* Setup default parameters */
+	desc.clk_prepare.mipi_min = clk_prepare_spec_min;
+	desc.clk_prepare.mipi_max = clk_prepare_spec_max;
+	desc.clk_trail.mipi_min = clk_trail_spec_min;
+	desc.hs_exit.mipi_min = hs_exit_spec_min;
+	desc.hs_exit.rec_max = hs_exit_reco_max;
+
+	desc.clk_prepare.rec_min = DIV_ROUND_UP(
+			(desc.clk_prepare.mipi_min * clk_params.bitclk_mbps),
+			(8 * clk_params.tlpx_numer_ns)
+			);
+
+	desc.clk_prepare.rec_max = rounddown(
+		mult_frac((desc.clk_prepare.mipi_max * clk_params.bitclk_mbps),
+			  1, (8 * clk_params.tlpx_numer_ns)),
+		1);
+
+	desc.hs_rqst.mipi_min = hs_rqst_spec_min;
+	desc.hs_rqst_clk.mipi_min = hs_rqst_spec_min;
+
+	pr_debug("BIT CLOCK = %d, tlpx_numer_ns=%d, treot_ns=%d\n",
+	       clk_params.bitclk_mbps, clk_params.tlpx_numer_ns,
+	       clk_params.treot_ns);
+	rc = dsi_phy_calc_timing_params(&clk_params, &desc);
+	if (rc) {
+		pr_err("Timing calc failed, rc=%d\n", rc);
+		goto error;
+	}
+
+
+	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+		timing->lane[i][0] = desc.hs_exit.reg_value;
+
+		if (i == DSI_LOGICAL_CLOCK_LANE)
+			timing->lane[i][1] = desc.clk_zero.reg_value;
+		else
+			timing->lane[i][1] = desc.hs_zero.reg_value;
+
+		if (i == DSI_LOGICAL_CLOCK_LANE)
+			timing->lane[i][2] = desc.clk_prepare.reg_value;
+		else
+			timing->lane[i][2] = desc.hs_prepare.reg_value;
+
+		if (i == DSI_LOGICAL_CLOCK_LANE)
+			timing->lane[i][3] = desc.clk_trail.reg_value;
+		else
+			timing->lane[i][3] = desc.hs_trail.reg_value;
+
+		if (i == DSI_LOGICAL_CLOCK_LANE)
+			timing->lane[i][4] = desc.hs_rqst_clk.reg_value;
+		else
+			timing->lane[i][4] = desc.hs_rqst.reg_value;
+
+		timing->lane[i][5] = 0x3;
+		timing->lane[i][6] = 0x4;
+		timing->lane[i][7] = 0xA0;
+		pr_debug("[%d][%d %d %d %d %d]\n", i, timing->lane[i][0],
+						    timing->lane[i][1],
+						    timing->lane[i][2],
+						    timing->lane[i][3],
+						    timing->lane[i][4]);
+	}
+	timing->count_per_lane = 8;
+
+error:
+	return rc;
+}
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 0da9862..70e9fd5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -142,9 +142,9 @@ static int sun4i_drv_bind(struct device *dev)
 
 	/* Create our layers */
 	drv->layers = sun4i_layers_init(drm);
-	if (!drv->layers) {
+	if (IS_ERR(drv->layers)) {
 		dev_err(drm->dev, "Couldn't create the planes\n");
-		ret = -EINVAL;
+		ret = PTR_ERR(drv->layers);
 		goto free_drm;
 	}
 
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index c3ff10f..d198ad7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -152,15 +152,13 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
 
 	DRM_DEBUG_DRIVER("Enabling RGB output\n");
 
-	if (!IS_ERR(tcon->panel)) {
+	if (!IS_ERR(tcon->panel))
 		drm_panel_prepare(tcon->panel);
-		drm_panel_enable(tcon->panel);
-	}
-
-	/* encoder->bridge can be NULL; drm_bridge_enable checks for it */
-	drm_bridge_enable(encoder->bridge);
 
 	sun4i_tcon_channel_enable(tcon, 0);
+
+	if (!IS_ERR(tcon->panel))
+		drm_panel_enable(tcon->panel);
 }
 
 static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
@@ -171,15 +169,13 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
 
 	DRM_DEBUG_DRIVER("Disabling RGB output\n");
 
+	if (!IS_ERR(tcon->panel))
+		drm_panel_disable(tcon->panel);
+
 	sun4i_tcon_channel_disable(tcon, 0);
 
-	/* encoder->bridge can be NULL; drm_bridge_disable checks for it */
-	drm_bridge_disable(encoder->bridge);
-
-	if (!IS_ERR(tcon->panel)) {
-		drm_panel_disable(tcon->panel);
+	if (!IS_ERR(tcon->panel))
 		drm_panel_unprepare(tcon->panel);
-	}
 }
 
 static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index d223650..11edabf 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -59,7 +59,6 @@
 
 config I2C_MUX
 	tristate "I2C bus multiplexing support"
-	depends on HAS_IOMEM
 	help
 	  Say Y here if you want the I2C core to support the ability to
 	  handle multiplexed I2C bus topologies, by presenting each
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 49f2084..50813a2 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -347,7 +347,7 @@ static int dc_i2c_probe(struct platform_device *pdev)
 
 	ret = i2c_add_adapter(&i2c->adap);
 	if (ret < 0) {
-		clk_unprepare(i2c->clk);
+		clk_disable_unprepare(i2c->clk);
 		return ret;
 	}
 
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index e280c8e..96de9ce 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -63,6 +63,7 @@
 
 config I2C_MUX_REG
 	tristate "Register-based I2C multiplexer"
+	depends on HAS_IOMEM
 	help
 	  If you say yes to this option, support will be included for a
 	  register based I2C multiplexer. This driver provides access to
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index b3893f6..3e6fe17 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -69,10 +69,28 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
 		goto err_with_revert;
 	}
 
-	p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name);
+	/*
+	 * Check if there are pinctrl states at all. Note: we cant' use
+	 * devm_pinctrl_get_select() because we need to distinguish between
+	 * the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state().
+	 */
+	p = devm_pinctrl_get(adap->dev.parent);
 	if (IS_ERR(p)) {
 		ret = PTR_ERR(p);
-		goto err_with_put;
+		/* continue if just no pinctrl states (e.g. i2c-gpio), otherwise exit */
+		if (ret != -ENODEV)
+			goto err_with_put;
+	} else {
+		/* there are states. check and use them */
+		struct pinctrl_state *s = pinctrl_lookup_state(p, priv->bus_name);
+
+		if (IS_ERR(s)) {
+			ret = PTR_ERR(s);
+			goto err_with_put;
+		}
+		ret = pinctrl_select_state(p, s);
+		if (ret < 0)
+			goto err_with_put;
 	}
 
 	priv->chan[new_chan].parent_adap = adap;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 1091346..8bc3d36 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -268,9 +268,9 @@ static int pca954x_probe(struct i2c_client *client,
 				/* discard unconfigured channels */
 				break;
 			idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
-			data->deselect |= (idle_disconnect_pd
-					   || idle_disconnect_dt) << num;
 		}
+		data->deselect |= (idle_disconnect_pd ||
+				   idle_disconnect_dt) << num;
 
 		ret = i2c_mux_add_adapter(muxc, force, num, class);
 
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index b136d3a..0f58f46 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -699,13 +699,16 @@ EXPORT_SYMBOL(rdma_addr_cancel);
 struct resolve_cb_context {
 	struct rdma_dev_addr *addr;
 	struct completion comp;
+	int status;
 };
 
 static void resolve_cb(int status, struct sockaddr *src_addr,
 	     struct rdma_dev_addr *addr, void *context)
 {
-	memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct
-				rdma_dev_addr));
+	if (!status)
+		memcpy(((struct resolve_cb_context *)context)->addr,
+		       addr, sizeof(struct rdma_dev_addr));
+	((struct resolve_cb_context *)context)->status = status;
 	complete(&((struct resolve_cb_context *)context)->comp);
 }
 
@@ -743,6 +746,10 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
 
 	wait_for_completion(&ctx.comp);
 
+	ret = ctx.status;
+	if (ret)
+		return ret;
+
 	memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
 	dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
 	if (!dev)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index c995255..71c7c4c 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -80,6 +80,8 @@ static struct ib_cm {
 	__be32 random_id_operand;
 	struct list_head timewait_list;
 	struct workqueue_struct *wq;
+	/* Sync on cm change port state */
+	spinlock_t state_lock;
 } cm;
 
 /* Counter indexes ordered by attribute ID */
@@ -161,6 +163,8 @@ struct cm_port {
 	struct ib_mad_agent *mad_agent;
 	struct kobject port_obj;
 	u8 port_num;
+	struct list_head cm_priv_prim_list;
+	struct list_head cm_priv_altr_list;
 	struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
 };
 
@@ -241,6 +245,12 @@ struct cm_id_private {
 	u8 service_timeout;
 	u8 target_ack_delay;
 
+	struct list_head prim_list;
+	struct list_head altr_list;
+	/* Indicates that the send port mad is registered and av is set */
+	int prim_send_port_not_ready;
+	int altr_send_port_not_ready;
+
 	struct list_head work_list;
 	atomic_t work_count;
 };
@@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
 	struct ib_mad_agent *mad_agent;
 	struct ib_mad_send_buf *m;
 	struct ib_ah *ah;
+	struct cm_av *av;
+	unsigned long flags, flags2;
+	int ret = 0;
 
+	/* don't let the port to be released till the agent is down */
+	spin_lock_irqsave(&cm.state_lock, flags2);
+	spin_lock_irqsave(&cm.lock, flags);
+	if (!cm_id_priv->prim_send_port_not_ready)
+		av = &cm_id_priv->av;
+	else if (!cm_id_priv->altr_send_port_not_ready &&
+		 (cm_id_priv->alt_av.port))
+		av = &cm_id_priv->alt_av;
+	else {
+		pr_info("%s: not valid CM id\n", __func__);
+		ret = -ENODEV;
+		spin_unlock_irqrestore(&cm.lock, flags);
+		goto out;
+	}
+	spin_unlock_irqrestore(&cm.lock, flags);
+	/* Make sure the port haven't released the mad yet */
 	mad_agent = cm_id_priv->av.port->mad_agent;
-	ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
-	if (IS_ERR(ah))
-		return PTR_ERR(ah);
+	if (!mad_agent) {
+		pr_info("%s: not a valid MAD agent\n", __func__);
+		ret = -ENODEV;
+		goto out;
+	}
+	ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
+	if (IS_ERR(ah)) {
+		ret = PTR_ERR(ah);
+		goto out;
+	}
 
 	m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
-			       cm_id_priv->av.pkey_index,
+			       av->pkey_index,
 			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
 			       GFP_ATOMIC,
 			       IB_MGMT_BASE_VERSION);
 	if (IS_ERR(m)) {
 		ib_destroy_ah(ah);
-		return PTR_ERR(m);
+		ret = PTR_ERR(m);
+		goto out;
 	}
 
 	/* Timeout set by caller if response is expected. */
@@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
 	atomic_inc(&cm_id_priv->refcount);
 	m->context[0] = cm_id_priv;
 	*msg = m;
-	return 0;
+
+out:
+	spin_unlock_irqrestore(&cm.state_lock, flags2);
+	return ret;
 }
 
 static int cm_alloc_response_msg(struct cm_port *port,
@@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
 			   grh, &av->ah_attr);
 }
 
-static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
+static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
+			      struct cm_id_private *cm_id_priv)
 {
 	struct cm_device *cm_dev;
 	struct cm_port *port = NULL;
@@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
 			     &av->ah_attr);
 	av->timeout = path->packet_life_time + 1;
 
-	return 0;
+	spin_lock_irqsave(&cm.lock, flags);
+	if (&cm_id_priv->av == av)
+		list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
+	else if (&cm_id_priv->alt_av == av)
+		list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
+	else
+		ret = -EINVAL;
+
+	spin_unlock_irqrestore(&cm.lock, flags);
+
+	return ret;
 }
 
 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
@@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
 	spin_lock_init(&cm_id_priv->lock);
 	init_completion(&cm_id_priv->comp);
 	INIT_LIST_HEAD(&cm_id_priv->work_list);
+	INIT_LIST_HEAD(&cm_id_priv->prim_list);
+	INIT_LIST_HEAD(&cm_id_priv->altr_list);
 	atomic_set(&cm_id_priv->work_count, -1);
 	atomic_set(&cm_id_priv->refcount, 1);
 	return &cm_id_priv->id;
@@ -892,6 +945,15 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
 		break;
 	}
 
+	spin_lock_irq(&cm.lock);
+	if (!list_empty(&cm_id_priv->altr_list) &&
+	    (!cm_id_priv->altr_send_port_not_ready))
+		list_del(&cm_id_priv->altr_list);
+	if (!list_empty(&cm_id_priv->prim_list) &&
+	    (!cm_id_priv->prim_send_port_not_ready))
+		list_del(&cm_id_priv->prim_list);
+	spin_unlock_irq(&cm.lock);
+
 	cm_free_id(cm_id->local_id);
 	cm_deref_id(cm_id_priv);
 	wait_for_completion(&cm_id_priv->comp);
@@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
 		goto out;
 	}
 
-	ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
+	ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
+				 cm_id_priv);
 	if (ret)
 		goto error1;
 	if (param->alternate_path) {
 		ret = cm_init_av_by_path(param->alternate_path,
-					 &cm_id_priv->alt_av);
+					 &cm_id_priv->alt_av, cm_id_priv);
 		if (ret)
 			goto error1;
 	}
@@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work)
 			dev_put(gid_attr.ndev);
 		}
 		work->path[0].gid_type = gid_attr.gid_type;
-		ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
+		ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
+					 cm_id_priv);
 	}
 	if (ret) {
 		int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
@@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work)
 		goto rejected;
 	}
 	if (req_msg->alt_local_lid) {
-		ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
+		ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
+					 cm_id_priv);
 		if (ret) {
 			ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
 				       &work->path[0].sgid,
@@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
 		goto out;
 	}
 
-	ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
+	ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
+				 cm_id_priv);
 	if (ret)
 		goto out;
 	cm_id_priv->alt_av.timeout =
@@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work)
 	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
 				work->mad_recv_wc->recv_buf.grh,
 				&cm_id_priv->av);
-	cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
+	cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
+			   cm_id_priv);
 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
 	if (!ret)
 		list_add_tail(&work->list, &cm_id_priv->work_list);
@@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
 		return -EINVAL;
 
 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-	ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
+	ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
 	if (ret)
 		goto out;
 
@@ -3468,7 +3535,9 @@ static int cm_establish(struct ib_cm_id *cm_id)
 static int cm_migrate(struct ib_cm_id *cm_id)
 {
 	struct cm_id_private *cm_id_priv;
+	struct cm_av tmp_av;
 	unsigned long flags;
+	int tmp_send_port_not_ready;
 	int ret = 0;
 
 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
 	    (cm_id->lap_state == IB_CM_LAP_UNINIT ||
 	     cm_id->lap_state == IB_CM_LAP_IDLE)) {
 		cm_id->lap_state = IB_CM_LAP_IDLE;
+		/* Swap address vector */
+		tmp_av = cm_id_priv->av;
 		cm_id_priv->av = cm_id_priv->alt_av;
+		cm_id_priv->alt_av = tmp_av;
+		/* Swap port send ready state */
+		tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
+		cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
+		cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
 	} else
 		ret = -EINVAL;
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device)
 		port->cm_dev = cm_dev;
 		port->port_num = i;
 
+		INIT_LIST_HEAD(&port->cm_priv_prim_list);
+		INIT_LIST_HEAD(&port->cm_priv_altr_list);
+
 		ret = cm_create_port_fs(port);
 		if (ret)
 			goto error1;
@@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
 {
 	struct cm_device *cm_dev = client_data;
 	struct cm_port *port;
+	struct cm_id_private *cm_id_priv;
+	struct ib_mad_agent *cur_mad_agent;
 	struct ib_port_modify port_modify = {
 		.clr_port_cap_mask = IB_PORT_CM_SUP
 	};
@@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
 
 		port = cm_dev->port[i-1];
 		ib_modify_port(ib_device, port->port_num, 0, &port_modify);
+		/* Mark all the cm_id's as not valid */
+		spin_lock_irq(&cm.lock);
+		list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
+			cm_id_priv->altr_send_port_not_ready = 1;
+		list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
+			cm_id_priv->prim_send_port_not_ready = 1;
+		spin_unlock_irq(&cm.lock);
 		/*
 		 * We flush the queue here after the going_down set, this
 		 * verify that no new works will be queued in the recv handler,
 		 * after that we can call the unregister_mad_agent
 		 */
 		flush_workqueue(cm.wq);
-		ib_unregister_mad_agent(port->mad_agent);
+		spin_lock_irq(&cm.state_lock);
+		cur_mad_agent = port->mad_agent;
+		port->mad_agent = NULL;
+		spin_unlock_irq(&cm.state_lock);
+		ib_unregister_mad_agent(cur_mad_agent);
 		cm_remove_port_fs(port);
 	}
+
 	device_unregister(cm_dev->device);
 	kfree(cm_dev);
 }
@@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void)
 	INIT_LIST_HEAD(&cm.device_list);
 	rwlock_init(&cm.device_lock);
 	spin_lock_init(&cm.lock);
+	spin_lock_init(&cm.state_lock);
 	cm.listen_service_table = RB_ROOT;
 	cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
 	cm.remote_id_table = RB_ROOT;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 89a6b05..2a6fc47 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2438,6 +2438,18 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
 	return 0;
 }
 
+static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
+					   unsigned long supported_gids,
+					   enum ib_gid_type default_gid)
+{
+	if ((network_type == RDMA_NETWORK_IPV4 ||
+	     network_type == RDMA_NETWORK_IPV6) &&
+	    test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
+		return IB_GID_TYPE_ROCE_UDP_ENCAP;
+
+	return default_gid;
+}
+
 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
 {
 	struct rdma_route *route = &id_priv->id.route;
@@ -2463,6 +2475,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
 	route->num_paths = 1;
 
 	if (addr->dev_addr.bound_dev_if) {
+		unsigned long supported_gids;
+
 		ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
 		if (!ndev) {
 			ret = -ENODEV;
@@ -2486,7 +2500,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
 
 		route->path_rec->net = &init_net;
 		route->path_rec->ifindex = ndev->ifindex;
-		route->path_rec->gid_type = id_priv->gid_type;
+		supported_gids = roce_gid_type_mask_support(id_priv->id.device,
+							    id_priv->id.port_num);
+		route->path_rec->gid_type =
+			cma_route_gid_type(addr->dev_addr.network,
+					   supported_gids,
+					   id_priv->gid_type);
 	}
 	if (!ndev) {
 		ret = -ENODEV;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 224ad27..84b4eff 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -175,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 
 	cur_base = addr & PAGE_MASK;
 
-	if (npages == 0) {
+	if (npages == 0 || npages > UINT_MAX) {
 		ret = -EINVAL;
 		goto out;
 	}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 0012fa5..44b1104 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
 			container_of(uobj, struct ib_uqp_object, uevent.uobject);
 
 		idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
-		if (qp != qp->real_qp) {
-			ib_close_qp(qp);
-		} else {
+		if (qp == qp->real_qp)
 			ib_uverbs_detach_umcast(qp, uqp);
-			ib_destroy_qp(qp);
-		}
+		ib_destroy_qp(qp);
 		ib_uverbs_release_uevent(file, &uqp->uevent);
 		kfree(uqp);
 	}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 867b8cf..19c6477 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -666,18 +666,6 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
 	return ret;
 }
 
-static void invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
-{
-	struct c4iw_mr *mhp;
-	unsigned long flags;
-
-	spin_lock_irqsave(&rhp->lock, flags);
-	mhp = get_mhp(rhp, rkey >> 8);
-	if (mhp)
-		mhp->attr.state = 0;
-	spin_unlock_irqrestore(&rhp->lock, flags);
-}
-
 /*
  * Get one cq entry from c4iw and map it to openib.
  *
@@ -733,7 +721,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
 		    CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
 			wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
 			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
-			invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
+			c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
 		}
 	} else {
 		switch (CQE_OPCODE(&cqe)) {
@@ -762,7 +750,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
 
 			/* Invalidate the MR if the fastreg failed */
 			if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
-				invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe));
+				c4iw_invalidate_mr(qhp->rhp,
+						   CQE_WRID_FR_STAG(&cqe));
 			break;
 		default:
 			printk(KERN_ERR MOD "Unexpected opcode %d "
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7e7f79e..4788e1a 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -999,6 +999,6 @@ extern int db_coalescing_threshold;
 extern int use_dsgl;
 void c4iw_drain_rq(struct ib_qp *qp);
 void c4iw_drain_sq(struct ib_qp *qp);
-
+void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
 
 #endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 80e2774..410408f 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -770,3 +770,15 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
 	kfree(mhp);
 	return 0;
 }
+
+void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
+{
+	struct c4iw_mr *mhp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rhp->lock, flags);
+	mhp = get_mhp(rhp, rkey >> 8);
+	if (mhp)
+		mhp->attr.state = 0;
+	spin_unlock_irqrestore(&rhp->lock, flags);
+}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index f57deba..b7ac97b 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -706,12 +706,8 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
 	return 0;
 }
 
-static int build_inv_stag(struct c4iw_dev *dev, union t4_wr *wqe,
-			  struct ib_send_wr *wr, u8 *len16)
+static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
 {
-	struct c4iw_mr *mhp = get_mhp(dev, wr->ex.invalidate_rkey >> 8);
-
-	mhp->attr.state = 0;
 	wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
 	wqe->inv.r2 = 0;
 	*len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
@@ -797,11 +793,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 	spin_lock_irqsave(&qhp->lock, flag);
 	if (t4_wq_in_error(&qhp->wq)) {
 		spin_unlock_irqrestore(&qhp->lock, flag);
+		*bad_wr = wr;
 		return -EINVAL;
 	}
 	num_wrs = t4_sq_avail(&qhp->wq);
 	if (num_wrs == 0) {
 		spin_unlock_irqrestore(&qhp->lock, flag);
+		*bad_wr = wr;
 		return -ENOMEM;
 	}
 	while (wr) {
@@ -840,10 +838,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		case IB_WR_RDMA_READ_WITH_INV:
 			fw_opcode = FW_RI_RDMA_READ_WR;
 			swsqe->opcode = FW_RI_READ_REQ;
-			if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
+			if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
+				c4iw_invalidate_mr(qhp->rhp,
+						   wr->sg_list[0].lkey);
 				fw_flags = FW_RI_RDMA_READ_INVALIDATE;
-			else
+			} else {
 				fw_flags = 0;
+			}
 			err = build_rdma_read(wqe, wr, &len16);
 			if (err)
 				break;
@@ -876,7 +877,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
 			fw_opcode = FW_RI_INV_LSTAG_WR;
 			swsqe->opcode = FW_RI_LOCAL_INV;
-			err = build_inv_stag(qhp->rhp, wqe, wr, &len16);
+			err = build_inv_stag(wqe, wr, &len16);
+			c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
 			break;
 		default:
 			PDBG("%s post of type=%d TBD!\n", __func__,
@@ -934,11 +936,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 	spin_lock_irqsave(&qhp->lock, flag);
 	if (t4_wq_in_error(&qhp->wq)) {
 		spin_unlock_irqrestore(&qhp->lock, flag);
+		*bad_wr = wr;
 		return -EINVAL;
 	}
 	num_wrs = t4_rq_avail(&qhp->wq);
 	if (num_wrs == 0) {
 		spin_unlock_irqrestore(&qhp->lock, flag);
+		*bad_wr = wr;
 		return -ENOMEM;
 	}
 	while (wr) {
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index a26a9a0..67ea85a 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -775,75 +775,3 @@ void hfi1_put_proc_affinity(int cpu)
 	}
 	mutex_unlock(&affinity->lock);
 }
-
-int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
-			   size_t count)
-{
-	struct hfi1_affinity_node *entry;
-	cpumask_var_t mask;
-	int ret, i;
-
-	mutex_lock(&node_affinity.lock);
-	entry = node_affinity_lookup(dd->node);
-
-	if (!entry) {
-		ret = -EINVAL;
-		goto unlock;
-	}
-
-	ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
-	if (!ret) {
-		ret = -ENOMEM;
-		goto unlock;
-	}
-
-	ret = cpulist_parse(buf, mask);
-	if (ret)
-		goto out;
-
-	if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) {
-		dd_dev_warn(dd, "Invalid CPU mask\n");
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* reset the SDMA interrupt affinity details */
-	init_cpu_mask_set(&entry->def_intr);
-	cpumask_copy(&entry->def_intr.mask, mask);
-
-	/* Reassign the affinity for each SDMA interrupt. */
-	for (i = 0; i < dd->num_msix_entries; i++) {
-		struct hfi1_msix_entry *msix;
-
-		msix = &dd->msix_entries[i];
-		if (msix->type != IRQ_SDMA)
-			continue;
-
-		ret = get_irq_affinity(dd, msix);
-
-		if (ret)
-			break;
-	}
-out:
-	free_cpumask_var(mask);
-unlock:
-	mutex_unlock(&node_affinity.lock);
-	return ret ? ret : strnlen(buf, PAGE_SIZE);
-}
-
-int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf)
-{
-	struct hfi1_affinity_node *entry;
-
-	mutex_lock(&node_affinity.lock);
-	entry = node_affinity_lookup(dd->node);
-
-	if (!entry) {
-		mutex_unlock(&node_affinity.lock);
-		return -EINVAL;
-	}
-
-	cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask);
-	mutex_unlock(&node_affinity.lock);
-	return strnlen(buf, PAGE_SIZE);
-}
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index b89ea3c..42e6331 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -102,10 +102,6 @@ int hfi1_get_proc_affinity(int);
 /* Release a CPU used by a user process. */
 void hfi1_put_proc_affinity(int);
 
-int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf);
-int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
-			   size_t count);
-
 struct hfi1_affinity_node {
 	int node;
 	struct cpu_mask_set def_intr;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 9bf5f23..24d0820 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6301,19 +6301,8 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
 	/* leave shared count at zero for both global and VL15 */
 	write_global_credit(dd, vau, vl15buf, 0);
 
-	/* We may need some credits for another VL when sending packets
-	 * with the snoop interface. Dividing it down the middle for VL15
-	 * and VL0 should suffice.
-	 */
-	if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
-		write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
-		    << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
-		write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
-		    << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
-	} else {
-		write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
-			<< SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
-	}
+	write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
+		  << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
 }
 
 /*
@@ -9915,9 +9904,6 @@ static void set_lidlmc(struct hfi1_pportdata *ppd)
 	u32 mask = ~((1U << ppd->lmc) - 1);
 	u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
 
-	if (dd->hfi1_snoop.mode_flag)
-		dd_dev_info(dd, "Set lid/lmc while snooping");
-
 	c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
 		| DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
 	c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
@@ -12112,7 +12098,7 @@ static void update_synth_timer(unsigned long opaque)
 	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
 }
 
-#define C_MAX_NAME 13 /* 12 chars + one for /0 */
+#define C_MAX_NAME 16 /* 15 chars + one for /0 */
 static int init_cntrs(struct hfi1_devdata *dd)
 {
 	int i, rcv_ctxts, j;
@@ -14463,7 +14449,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
 	 * Any error printing is already done by the init code.
 	 * On return, we have the chip mapped.
 	 */
-	ret = hfi1_pcie_ddinit(dd, pdev, ent);
+	ret = hfi1_pcie_ddinit(dd, pdev);
 	if (ret < 0)
 		goto bail_free;
 
@@ -14691,6 +14677,11 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
 	if (ret)
 		goto bail_free_cntrs;
 
+	init_completion(&dd->user_comp);
+
+	/* The user refcount starts with one to inidicate an active device */
+	atomic_set(&dd->user_refcount, 1);
+
 	goto bail;
 
 bail_free_rcverr:
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 9234525..043fd21 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -320,6 +320,9 @@
 /* DC_DC8051_CFG_MODE.GENERAL bits */
 #define DISABLE_SELF_GUID_CHECK 0x2
 
+/* Bad L2 frame error code */
+#define BAD_L2_ERR      0x6
+
 /*
  * Eager buffer minimum and maximum sizes supported by the hardware.
  * All power-of-two sizes in between are supported as well.
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 6563e4d..c5efff2 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -599,7 +599,6 @@ static void __prescan_rxq(struct hfi1_packet *packet)
 					 dd->rhf_offset;
 		struct rvt_qp *qp;
 		struct ib_header *hdr;
-		struct ib_other_headers *ohdr;
 		struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
 		u64 rhf = rhf_to_cpu(rhf_addr);
 		u32 etype = rhf_rcv_type(rhf), qpn, bth1;
@@ -615,18 +614,21 @@ static void __prescan_rxq(struct hfi1_packet *packet)
 		if (etype != RHF_RCV_TYPE_IB)
 			goto next;
 
-		hdr = hfi1_get_msgheader(dd, rhf_addr);
+		packet->hdr = hfi1_get_msgheader(dd, rhf_addr);
+		hdr = packet->hdr;
 
 		lnh = be16_to_cpu(hdr->lrh[0]) & 3;
 
-		if (lnh == HFI1_LRH_BTH)
-			ohdr = &hdr->u.oth;
-		else if (lnh == HFI1_LRH_GRH)
-			ohdr = &hdr->u.l.oth;
-		else
+		if (lnh == HFI1_LRH_BTH) {
+			packet->ohdr = &hdr->u.oth;
+		} else if (lnh == HFI1_LRH_GRH) {
+			packet->ohdr = &hdr->u.l.oth;
+			packet->rcv_flags |= HFI1_HAS_GRH;
+		} else {
 			goto next; /* just in case */
+		}
 
-		bth1 = be32_to_cpu(ohdr->bth[1]);
+		bth1 = be32_to_cpu(packet->ohdr->bth[1]);
 		is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK));
 
 		if (!is_ecn)
@@ -646,7 +648,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
 
 		/* turn off BECN, FECN */
 		bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK);
-		ohdr->bth[1] = cpu_to_be32(bth1);
+		packet->ohdr->bth[1] = cpu_to_be32(bth1);
 next:
 		update_ps_mdata(&mdata, rcd);
 	}
@@ -1360,12 +1362,25 @@ int process_receive_ib(struct hfi1_packet *packet)
 
 int process_receive_bypass(struct hfi1_packet *packet)
 {
+	struct hfi1_devdata *dd = packet->rcd->dd;
+
 	if (unlikely(rhf_err_flags(packet->rhf)))
 		handle_eflags(packet);
 
-	dd_dev_err(packet->rcd->dd,
+	dd_dev_err(dd,
 		   "Bypass packets are not supported in normal operation. Dropping\n");
-	incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors);
+	incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
+	if (!(dd->err_info_rcvport.status_and_code & OPA_EI_STATUS_SMASK)) {
+		u64 *flits = packet->ebuf;
+
+		if (flits && !(packet->rhf & RHF_LEN_ERR)) {
+			dd->err_info_rcvport.packet_flit1 = flits[0];
+			dd->err_info_rcvport.packet_flit2 =
+				packet->tlen > sizeof(flits[0]) ? flits[1] : 0;
+		}
+		dd->err_info_rcvport.status_and_code |=
+			(OPA_EI_STATUS_SMASK | BAD_L2_ERR);
+	}
 	return RHF_RCV_CONTINUE;
 }
 
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 677efa0..bd786b7 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -172,6 +172,9 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
 					       struct hfi1_devdata,
 					       user_cdev);
 
+	if (!atomic_inc_not_zero(&dd->user_refcount))
+		return -ENXIO;
+
 	/* Just take a ref now. Not all opens result in a context assign */
 	kobject_get(&dd->kobj);
 
@@ -183,11 +186,17 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
 		fd->rec_cpu_num = -1; /* no cpu affinity by default */
 		fd->mm = current->mm;
 		atomic_inc(&fd->mm->mm_count);
+		fp->private_data = fd;
+	} else {
+		fp->private_data = NULL;
+
+		if (atomic_dec_and_test(&dd->user_refcount))
+			complete(&dd->user_comp);
+
+		return -ENOMEM;
 	}
 
-	fp->private_data = fd;
-
-	return fd ? 0 : -ENOMEM;
+	return 0;
 }
 
 static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
@@ -798,6 +807,10 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
 done:
 	mmdrop(fdata->mm);
 	kobject_put(&dd->kobj);
+
+	if (atomic_dec_and_test(&dd->user_refcount))
+		complete(&dd->user_comp);
+
 	kfree(fdata);
 	return 0;
 }
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 7eef11b..cc87fd4 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -367,26 +367,6 @@ struct hfi1_packet {
 	u8 etype;
 };
 
-/*
- * Private data for snoop/capture support.
- */
-struct hfi1_snoop_data {
-	int mode_flag;
-	struct cdev cdev;
-	struct device *class_dev;
-	/* protect snoop data */
-	spinlock_t snoop_lock;
-	struct list_head queue;
-	wait_queue_head_t waitq;
-	void *filter_value;
-	int (*filter_callback)(void *hdr, void *data, void *value);
-	u64 dcc_cfg; /* saved value of DCC Cfg register */
-};
-
-/* snoop mode_flag values */
-#define HFI1_PORT_SNOOP_MODE     1U
-#define HFI1_PORT_CAPTURE_MODE   2U
-
 struct rvt_sge_state;
 
 /*
@@ -613,8 +593,6 @@ struct hfi1_pportdata {
 	struct mutex hls_lock;
 	u32 host_link_state;
 
-	spinlock_t            sdma_alllock ____cacheline_aligned_in_smp;
-
 	u32 lstate;	/* logical link state */
 
 	/* these are the "32 bit" regs */
@@ -1104,8 +1082,6 @@ struct hfi1_devdata {
 	char *portcntrnames;
 	size_t portcntrnameslen;
 
-	struct hfi1_snoop_data hfi1_snoop;
-
 	struct err_info_rcvport err_info_rcvport;
 	struct err_info_constraint err_info_rcv_constraint;
 	struct err_info_constraint err_info_xmit_constraint;
@@ -1141,8 +1117,8 @@ struct hfi1_devdata {
 	rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
 
 	/*
-	 * Handlers for outgoing data so that snoop/capture does not
-	 * have to have its hooks in the send path
+	 * Capability to have different send engines simply by changing a
+	 * pointer value.
 	 */
 	send_routine process_pio_send;
 	send_routine process_dma_send;
@@ -1174,6 +1150,10 @@ struct hfi1_devdata {
 	spinlock_t aspm_lock;
 	/* Number of verbs contexts which have disabled ASPM */
 	atomic_t aspm_disabled_cnt;
+	/* Keeps track of user space clients */
+	atomic_t user_refcount;
+	/* Used to wait for outstanding user space clients before dev removal */
+	struct completion user_comp;
 
 	struct hfi1_affinity *affinity;
 	struct rhashtable sdma_rht;
@@ -1221,8 +1201,6 @@ struct hfi1_devdata *hfi1_lookup(int unit);
 extern u32 hfi1_cpulist_count;
 extern unsigned long *hfi1_cpulist;
 
-extern unsigned int snoop_drop_send;
-extern unsigned int snoop_force_capture;
 int hfi1_init(struct hfi1_devdata *, int);
 int hfi1_count_units(int *npresentp, int *nupp);
 int hfi1_count_active_units(void);
@@ -1557,13 +1535,6 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
 void reset_link_credits(struct hfi1_devdata *dd);
 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
 
-int snoop_recv_handler(struct hfi1_packet *packet);
-int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
-			   u64 pbc);
-int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
-			   u64 pbc);
-void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
-			   u64 pbc, const void *from, size_t count);
 int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc);
 
 static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
@@ -1763,8 +1734,7 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
 
 int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *);
 void hfi1_pcie_cleanup(struct pci_dev *);
-int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *,
-		     const struct pci_device_id *);
+int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *);
 void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
 void hfi1_pcie_flr(struct hfi1_devdata *);
 int pcie_speeds(struct hfi1_devdata *);
@@ -1799,8 +1769,6 @@ int kdeth_process_expected(struct hfi1_packet *packet);
 int kdeth_process_eager(struct hfi1_packet *packet);
 int process_receive_invalid(struct hfi1_packet *packet);
 
-extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8];
-
 void update_sge(struct rvt_sge_state *ss, u32 length);
 
 /* global module parameter variables */
@@ -1827,9 +1795,6 @@ extern struct mutex hfi1_mutex;
 #define DRIVER_NAME		"hfi1"
 #define HFI1_USER_MINOR_BASE     0
 #define HFI1_TRACE_MINOR         127
-#define HFI1_DIAGPKT_MINOR       128
-#define HFI1_DIAG_MINOR_BASE     129
-#define HFI1_SNOOP_CAPTURE_BASE  200
 #define HFI1_NMINORS             255
 
 #define PCI_VENDOR_ID_INTEL 0x8086
@@ -1848,7 +1813,13 @@ extern struct mutex hfi1_mutex;
 static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
 						  u16 ctxt_type)
 {
-	u64 base_sc_integrity =
+	u64 base_sc_integrity;
+
+	/* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
+	if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
+		return 0;
+
+	base_sc_integrity =
 	SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
@@ -1863,7 +1834,6 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
 	| SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
 	| SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK
 	| SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK
-	| SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
 	| SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK
 	| SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK;
 
@@ -1872,18 +1842,23 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
 	else
 		base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
 
-	if (is_ax(dd))
-		/* turn off send-side job key checks - A0 */
-		return base_sc_integrity &
-		       ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+	/* turn on send-side job key checks if !A0 */
+	if (!is_ax(dd))
+		base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+
 	return base_sc_integrity;
 }
 
 static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
 {
-	u64 base_sdma_integrity =
+	u64 base_sdma_integrity;
+
+	/* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
+	if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
+		return 0;
+
+	base_sdma_integrity =
 	SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
-	| SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
 	| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
 	| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
 	| SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
@@ -1895,14 +1870,18 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
 	| SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
 	| SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK
 	| SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK
-	| SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
 	| SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
 	| SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
 
-	if (is_ax(dd))
-		/* turn off send-side job key checks - A0 */
-		return base_sdma_integrity &
-		       ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+	if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
+		base_sdma_integrity |=
+		SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK;
+
+	/* turn on send-side job key checks if !A0 */
+	if (!is_ax(dd))
+		base_sdma_integrity |=
+			SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+
 	return base_sdma_integrity;
 }
 
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 60db615..e3b5bc9 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -144,6 +144,8 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
 		struct hfi1_ctxtdata *rcd;
 
 		ppd = dd->pport + (i % dd->num_pports);
+
+		/* dd->rcd[i] gets assigned inside the callee */
 		rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
 		if (!rcd) {
 			dd_dev_err(dd,
@@ -169,8 +171,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
 		if (!rcd->sc) {
 			dd_dev_err(dd,
 				   "Unable to allocate kernel send context, failing\n");
-			dd->rcd[rcd->ctxt] = NULL;
-			hfi1_free_ctxtdata(dd, rcd);
 			goto nomem;
 		}
 
@@ -178,9 +178,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
 		if (ret < 0) {
 			dd_dev_err(dd,
 				   "Failed to setup kernel receive context, failing\n");
-			sc_free(rcd->sc);
-			dd->rcd[rcd->ctxt] = NULL;
-			hfi1_free_ctxtdata(dd, rcd);
 			ret = -EFAULT;
 			goto bail;
 		}
@@ -196,6 +193,10 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
 nomem:
 	ret = -ENOMEM;
 bail:
+	if (dd->rcd) {
+		for (i = 0; i < dd->num_rcv_contexts; ++i)
+			hfi1_free_ctxtdata(dd, dd->rcd[i]);
+	}
 	kfree(dd->rcd);
 	dd->rcd = NULL;
 	return ret;
@@ -216,7 +217,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
 	    dd->num_rcv_contexts - dd->first_user_ctxt)
 		kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
 				 (dd->num_rcv_contexts - dd->first_user_ctxt));
-	rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
+	rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
 	if (rcd) {
 		u32 rcvtids, max_entries;
 
@@ -261,13 +262,6 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
 		}
 		rcd->eager_base = base * dd->rcv_entries.group_size;
 
-		/* Validate and initialize Rcv Hdr Q variables */
-		if (rcvhdrcnt % HDRQ_INCREMENT) {
-			dd_dev_err(dd,
-				   "ctxt%u: header queue count %d must be divisible by %lu\n",
-				   rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT);
-			goto bail;
-		}
 		rcd->rcvhdrq_cnt = rcvhdrcnt;
 		rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
 		/*
@@ -506,7 +500,6 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
 	INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
 
 	mutex_init(&ppd->hls_lock);
-	spin_lock_init(&ppd->sdma_alllock);
 	spin_lock_init(&ppd->qsfp_info.qsfp_lock);
 
 	ppd->qsfp_info.ppd = ppd;
@@ -1399,28 +1392,43 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
 	hfi1_free_devdata(dd);
 }
 
+static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt)
+{
+	if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
+		hfi1_early_err(dev, "Receive header queue count too small\n");
+		return -EINVAL;
+	}
+
+	if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
+		hfi1_early_err(dev,
+			       "Receive header queue count cannot be greater than %u\n",
+			       HFI1_MAX_HDRQ_EGRBUF_CNT);
+		return -EINVAL;
+	}
+
+	if (thecnt % HDRQ_INCREMENT) {
+		hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n",
+			       thecnt, HDRQ_INCREMENT);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	int ret = 0, j, pidx, initfail;
-	struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
+	struct hfi1_devdata *dd;
 	struct hfi1_pportdata *ppd;
 
 	/* First, lock the non-writable module parameters */
 	HFI1_CAP_LOCK();
 
 	/* Validate some global module parameters */
-	if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
-		hfi1_early_err(&pdev->dev, "Header queue  count too small\n");
-		ret = -EINVAL;
+	ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt);
+	if (ret)
 		goto bail;
-	}
-	if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
-		hfi1_early_err(&pdev->dev,
-			       "Receive header queue count cannot be greater than %u\n",
-			       HFI1_MAX_HDRQ_EGRBUF_CNT);
-		ret = -EINVAL;
-		goto bail;
-	}
+
 	/* use the encoding function as a sanitization check */
 	if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
 		hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
@@ -1461,26 +1469,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (ret)
 		goto bail;
 
-	/*
-	 * Do device-specific initialization, function table setup, dd
-	 * allocation, etc.
-	 */
-	switch (ent->device) {
-	case PCI_DEVICE_ID_INTEL0:
-	case PCI_DEVICE_ID_INTEL1:
-		dd = hfi1_init_dd(pdev, ent);
-		break;
-	default:
+	if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
+	      ent->device == PCI_DEVICE_ID_INTEL1)) {
 		hfi1_early_err(&pdev->dev,
 			       "Failing on unknown Intel deviceid 0x%x\n",
 			       ent->device);
 		ret = -ENODEV;
+		goto clean_bail;
 	}
 
-	if (IS_ERR(dd))
+	/*
+	 * Do device-specific initialization, function table setup, dd
+	 * allocation, etc.
+	 */
+	dd = hfi1_init_dd(pdev, ent);
+
+	if (IS_ERR(dd)) {
 		ret = PTR_ERR(dd);
-	if (ret)
 		goto clean_bail; /* error already printed */
+	}
 
 	ret = create_workqueues(dd);
 	if (ret)
@@ -1538,12 +1545,31 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	return ret;
 }
 
+static void wait_for_clients(struct hfi1_devdata *dd)
+{
+	/*
+	 * Remove the device init value and complete the device if there is
+	 * no clients or wait for active clients to finish.
+	 */
+	if (atomic_dec_and_test(&dd->user_refcount))
+		complete(&dd->user_comp);
+
+	wait_for_completion(&dd->user_comp);
+}
+
 static void remove_one(struct pci_dev *pdev)
 {
 	struct hfi1_devdata *dd = pci_get_drvdata(pdev);
 
 	/* close debugfs files before ib unregister */
 	hfi1_dbg_ibdev_exit(&dd->verbs_dev);
+
+	/* remove the /dev hfi1 interface */
+	hfi1_device_remove(dd);
+
+	/* wait for existing user space clients to finish */
+	wait_for_clients(dd);
+
 	/* unregister from IB core */
 	hfi1_unregister_ib_device(dd);
 
@@ -1558,8 +1584,6 @@ static void remove_one(struct pci_dev *pdev)
 	/* wait until all of our (qsfp) queue_work() calls complete */
 	flush_workqueue(ib_wq);
 
-	hfi1_device_remove(dd);
-
 	postinit_cleanup(dd);
 }
 
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 89c68da..4ac8f33 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -157,8 +157,7 @@ void hfi1_pcie_cleanup(struct pci_dev *pdev)
  * fields required to re-initialize after a chip reset, or for
  * various other purposes
  */
-int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev,
-		     const struct pci_device_id *ent)
+int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
 {
 	unsigned long len;
 	resource_size_t addr;
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 50a3a36..d89b874 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -668,19 +668,12 @@ void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
 void set_pio_integrity(struct send_context *sc)
 {
 	struct hfi1_devdata *dd = sc->dd;
-	u64 reg = 0;
 	u32 hw_context = sc->hw_context;
 	int type = sc->type;
 
-	/*
-	 * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if
-	 * we're snooping.
-	 */
-	if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
-	    dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE)
-		reg = hfi1_pkt_default_send_ctxt_mask(dd, type);
-
-	write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg);
+	write_kctxt_csr(dd, hw_context,
+			SC(CHECK_ENABLE),
+			hfi1_pkt_default_send_ctxt_mask(dd, type));
 }
 
 static u32 get_buffers_allocated(struct send_context *sc)
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 8bc5013..83198a8 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -89,7 +89,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
 
 	lockdep_assert_held(&qp->s_lock);
 	qp->s_flags |= RVT_S_WAIT_RNR;
-	qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
+	priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to);
 	add_timer(&priv->s_rnr_timer);
 }
 
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index fd39bca..9cbe52d 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -2009,11 +2009,6 @@ static void sdma_hw_start_up(struct sdma_engine *sde)
 	write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
 }
 
-#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
-(r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
-
-#define SET_STATIC_RATE_CONTROL_SMASK(r) \
-(r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
 /*
  * set_sdma_integrity
  *
@@ -2022,19 +2017,9 @@ static void sdma_hw_start_up(struct sdma_engine *sde)
 static void set_sdma_integrity(struct sdma_engine *sde)
 {
 	struct hfi1_devdata *dd = sde->dd;
-	u64 reg;
 
-	if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY)))
-		return;
-
-	reg = hfi1_pkt_base_sdma_integrity(dd);
-
-	if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
-		CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
-	else
-		SET_STATIC_RATE_CONTROL_SMASK(reg);
-
-	write_sde_csr(sde, SD(CHECK_ENABLE), reg);
+	write_sde_csr(sde, SD(CHECK_ENABLE),
+		      hfi1_pkt_base_sdma_integrity(dd));
 }
 
 static void init_sdma_regs(
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index edba224..919a547 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -49,7 +49,6 @@
 #include "hfi.h"
 #include "mad.h"
 #include "trace.h"
-#include "affinity.h"
 
 /*
  * Start of per-port congestion control structures and support code
@@ -623,27 +622,6 @@ static ssize_t show_tempsense(struct device *device,
 	return ret;
 }
 
-static ssize_t show_sdma_affinity(struct device *device,
-				  struct device_attribute *attr, char *buf)
-{
-	struct hfi1_ibdev *dev =
-		container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
-	struct hfi1_devdata *dd = dd_from_dev(dev);
-
-	return hfi1_get_sdma_affinity(dd, buf);
-}
-
-static ssize_t store_sdma_affinity(struct device *device,
-				   struct device_attribute *attr,
-				   const char *buf, size_t count)
-{
-	struct hfi1_ibdev *dev =
-		container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
-	struct hfi1_devdata *dd = dd_from_dev(dev);
-
-	return hfi1_set_sdma_affinity(dd, buf, count);
-}
-
 /*
  * end of per-unit (or driver, in some cases, but replicated
  * per unit) functions
@@ -658,8 +636,6 @@ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
 static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
 static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
 static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
-static DEVICE_ATTR(sdma_affinity, S_IWUSR | S_IRUGO, show_sdma_affinity,
-		   store_sdma_affinity);
 
 static struct device_attribute *hfi1_attributes[] = {
 	&dev_attr_hw_rev,
@@ -670,7 +646,6 @@ static struct device_attribute *hfi1_attributes[] = {
 	&dev_attr_boardversion,
 	&dev_attr_tempsense,
 	&dev_attr_chip_reset,
-	&dev_attr_sdma_affinity,
 };
 
 int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
index 11e02b2..f77e59f 100644
--- a/drivers/infiniband/hw/hfi1/trace_rx.h
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -253,66 +253,6 @@ TRACE_EVENT(hfi1_mmu_invalidate,
 		      )
 	    );
 
-#define SNOOP_PRN \
-	"slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
-	"svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
-
-TRACE_EVENT(snoop_capture,
-	    TP_PROTO(struct hfi1_devdata *dd,
-		     int hdr_len,
-		     struct ib_header *hdr,
-		     int data_len,
-		     void *data),
-	    TP_ARGS(dd, hdr_len, hdr, data_len, data),
-	    TP_STRUCT__entry(
-			     DD_DEV_ENTRY(dd)
-			     __field(u16, slid)
-			     __field(u16, dlid)
-			     __field(u32, qpn)
-			     __field(u8, opcode)
-			     __field(u8, sl)
-			     __field(u16, pkey)
-			     __field(u32, hdr_len)
-			     __field(u32, data_len)
-			     __field(u8, lnh)
-			     __dynamic_array(u8, raw_hdr, hdr_len)
-			     __dynamic_array(u8, raw_pkt, data_len)
-			     ),
-	    TP_fast_assign(
-		struct ib_other_headers *ohdr;
-
-		__entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
-		if (__entry->lnh == HFI1_LRH_BTH)
-		ohdr = &hdr->u.oth;
-		else
-		ohdr = &hdr->u.l.oth;
-		DD_DEV_ASSIGN(dd);
-		__entry->slid = be16_to_cpu(hdr->lrh[3]);
-		__entry->dlid = be16_to_cpu(hdr->lrh[1]);
-		__entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
-		__entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
-		__entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
-		__entry->pkey =	be32_to_cpu(ohdr->bth[0]) & 0xffff;
-		__entry->hdr_len = hdr_len;
-		__entry->data_len = data_len;
-		memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
-		memcpy(__get_dynamic_array(raw_pkt), data, data_len);
-		),
-	    TP_printk(
-		"[%s] " SNOOP_PRN,
-		__get_str(dev),
-		__entry->slid,
-		__entry->dlid,
-		__entry->qpn,
-		__entry->opcode,
-		show_ib_opcode(__entry->opcode),
-		__entry->sl,
-		__entry->pkey,
-		__entry->hdr_len,
-		__entry->data_len
-		)
-);
-
 #endif /* __HFI1_TRACE_RX_H */
 
 #undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index a761f80..77697d6 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1144,7 +1144,7 @@ static int pin_vector_pages(struct user_sdma_request *req,
 	rb_node = hfi1_mmu_rb_extract(pq->handler,
 				      (unsigned long)iovec->iov.iov_base,
 				      iovec->iov.iov_len);
-	if (rb_node && !IS_ERR(rb_node))
+	if (rb_node)
 		node = container_of(rb_node, struct sdma_mmu_node, rb);
 	else
 		rb_node = NULL;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 5fc6233..b9bf075 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
 	if (vlan_tag < 0x1000)
 		vlan_tag |= (ah_attr->sl & 7) << 13;
 	ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
-	ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
+	ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
+	if (ret < 0)
+		return ERR_PTR(ret);
+	ah->av.eth.gid_index = ret;
 	ah->av.eth.vlan = cpu_to_be16(vlan_tag);
 	ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
 	if (ah_attr->static_rate) {
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 1ea686b..6a0fec3 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
 	if (context)
 		if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
 			err = -EFAULT;
-			goto err_dbmap;
+			goto err_cq_free;
 		}
 
 	return &cq->ibcq;
 
+err_cq_free:
+	mlx4_cq_free(dev->dev, &cq->mcq);
+
 err_dbmap:
 	if (context)
 		mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 79d017b..fcd04b8 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -932,8 +932,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
 		if (err)
 			goto err_create;
 	} else {
-		/* for now choose 64 bytes till we have a proper interface */
-		cqe_size = 64;
+		cqe_size = cache_line_size() == 128 ? 128 : 64;
 		err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
 				       &index, &inlen);
 		if (err)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 63036c7..32b09f0 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2311,14 +2311,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
 {
 	struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
 	struct ib_event ibev;
-
+	bool fatal = false;
 	u8 port = 0;
 
 	switch (event) {
 	case MLX5_DEV_EVENT_SYS_ERROR:
-		ibdev->ib_active = false;
 		ibev.event = IB_EVENT_DEVICE_FATAL;
 		mlx5_ib_handle_internal_error(ibdev);
+		fatal = true;
 		break;
 
 	case MLX5_DEV_EVENT_PORT_UP:
@@ -2370,6 +2370,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
 
 	if (ibdev->ib_active)
 		ib_dispatch_event(&ibev);
+
+	if (fatal)
+		ibdev->ib_active = false;
 }
 
 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
@@ -3115,7 +3118,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 	}
 	err = init_node_data(dev);
 	if (err)
-		goto err_dealloc;
+		goto err_free_port;
 
 	mutex_init(&dev->flow_db.lock);
 	mutex_init(&dev->cap_mask_mutex);
@@ -3125,7 +3128,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 	if (ll == IB_LINK_LAYER_ETHERNET) {
 		err = mlx5_enable_roce(dev);
 		if (err)
-			goto err_dealloc;
+			goto err_free_port;
 	}
 
 	err = create_dev_resources(&dev->devr);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index dcdcd19..7d68990 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -626,6 +626,8 @@ struct mlx5_ib_dev {
 	struct mlx5_ib_resources	devr;
 	struct mlx5_mr_cache		cache;
 	struct timer_list		delay_timer;
+	/* Prevents soft lock on massive reg MRs */
+	struct mutex			slow_path_mutex;
 	int				fill_delay;
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 	struct ib_odp_caps	odp_caps;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index d4ad672..4e90124 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -610,6 +610,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
 	int err;
 	int i;
 
+	mutex_init(&dev->slow_path_mutex);
 	cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
 	if (!cache->wq) {
 		mlx5_ib_warn(dev, "failed to create work queue\n");
@@ -1182,9 +1183,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 		goto error;
 	}
 
-	if (!mr)
+	if (!mr) {
+		mutex_lock(&dev->slow_path_mutex);
 		mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
 				page_shift, access_flags);
+		mutex_unlock(&dev->slow_path_mutex);
+	}
 
 	if (IS_ERR(mr)) {
 		err = PTR_ERR(mr);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 7ce97da..d1e9218 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -2051,8 +2051,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
 
 		mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
 			    qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
-			    to_mcq(init_attr->recv_cq)->mcq.cqn,
-			    to_mcq(init_attr->send_cq)->mcq.cqn);
+			    init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
+			    init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
 
 		qp->trans_qp.xrcdn = xrcdn;
 
@@ -4814,6 +4814,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
 				 udata->inlen))
 		return ERR_PTR(-EOPNOTSUPP);
 
+	if (init_attr->log_ind_tbl_size >
+	    MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
+		mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
+			    init_attr->log_ind_tbl_size,
+			    MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
+		return ERR_PTR(-EINVAL);
+	}
+
 	min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
 	if (udata->outlen && udata->outlen < min_resp_len)
 		return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
index 01f71ca..f2cefb0 100644
--- a/drivers/infiniband/sw/rdmavt/dma.c
+++ b/drivers/infiniband/sw/rdmavt/dma.c
@@ -90,9 +90,6 @@ static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
 	if (WARN_ON(!valid_dma_direction(direction)))
 		return BAD_DMA_ADDRESS;
 
-	if (offset + size > PAGE_SIZE)
-		return BAD_DMA_ADDRESS;
-
 	addr = (u64)page_address(page);
 	if (addr)
 		addr += offset;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index b8258e4..ffff5a5 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
 {
 	int err;
 	struct socket *sock;
-	struct udp_port_cfg udp_cfg;
-	struct udp_tunnel_sock_cfg tnl_cfg;
-
-	memset(&udp_cfg, 0, sizeof(udp_cfg));
+	struct udp_port_cfg udp_cfg = {0};
+	struct udp_tunnel_sock_cfg tnl_cfg = {0};
 
 	if (ipv6) {
 		udp_cfg.family = AF_INET6;
@@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
 		return ERR_PTR(err);
 	}
 
-	tnl_cfg.sk_user_data = NULL;
 	tnl_cfg.encap_type = 1;
 	tnl_cfg.encap_rcv = rxe_udp_encap_recv;
-	tnl_cfg.encap_destroy = NULL;
 
 	/* Setup UDP tunnel */
 	setup_udp_tunnel_sock(net, sock, &tnl_cfg);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index b8036cf..c3e60e4 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
 	if (qp->sq.queue) {
 		__rxe_do_task(&qp->comp.task);
 		__rxe_do_task(&qp->req.task);
+		rxe_queue_reset(qp->sq.queue);
 	}
 
 	/* cleanup attributes */
@@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp)
 {
 	qp->req.state = QP_STATE_ERROR;
 	qp->resp.state = QP_STATE_ERROR;
+	qp->attr.qp_state = IB_QPS_ERR;
 
 	/* drain work and packet queues */
 	rxe_run_task(&qp->resp.task, 1);
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index 0827425..d14bf49 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -84,6 +84,15 @@ int do_mmap_info(struct rxe_dev *rxe,
 	return -EINVAL;
 }
 
+inline void rxe_queue_reset(struct rxe_queue *q)
+{
+	/* queue is comprised from header and the memory
+	 * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
+	 * reset only the queue itself and not the management header
+	 */
+	memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
+}
+
 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
 				 int *num_elem,
 				 unsigned int elem_size)
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 239fd60..8c8641c 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe,
 		 size_t buf_size,
 		 struct rxe_mmap_info **ip_p);
 
+void rxe_queue_reset(struct rxe_queue *q);
+
 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
 				 int *num_elem,
 				 unsigned int elem_size);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 832846b..22bd963 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -696,7 +696,8 @@ int rxe_requester(void *arg)
 						       qp->req.wqe_index);
 			wqe->state = wqe_state_done;
 			wqe->status = IB_WC_SUCCESS;
-			goto complete;
+			__rxe_do_task(&qp->comp.task);
+			return 0;
 		}
 		payload = mtu;
 	}
@@ -745,13 +746,17 @@ int rxe_requester(void *arg)
 	wqe->status = IB_WC_LOC_PROT_ERR;
 	wqe->state = wqe_state_error;
 
-complete:
-	if (qp_type(qp) != IB_QPT_RC) {
-		while (rxe_completer(qp) == 0)
-			;
-	}
-
-	return 0;
+	/*
+	 * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
+	 * ---------8<---------8<-------------
+	 * ...Note that if a completion error occurs, a Work Completion
+	 * will always be generated, even if the signaling
+	 * indicator requests an Unsignaled Completion.
+	 * ---------8<---------8<-------------
+	 */
+	wqe->wr.send_flags |= IB_SEND_SIGNALED;
+	__rxe_do_task(&qp->comp.task);
+	return -EAGAIN;
 
 exit:
 	return -EAGAIN;
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 08c87fa..1f32688 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -65,6 +65,7 @@
 #include <linux/mailbox_controller.h>
 #include <linux/mailbox_client.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
+#include <acpi/pcc.h>
 
 #include "mailbox.h"
 
@@ -267,6 +268,8 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
 	if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
 		chan->txdone_method |= TXDONE_BY_ACK;
 
+	spin_unlock_irqrestore(&chan->lock, flags);
+
 	if (pcc_doorbell_irq[subspace_id] > 0) {
 		int rc;
 
@@ -275,12 +278,11 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
 		if (unlikely(rc)) {
 			dev_err(dev, "failed to register PCC interrupt %d\n",
 				pcc_doorbell_irq[subspace_id]);
+			pcc_mbox_free_channel(chan);
 			chan = ERR_PTR(rc);
 		}
 	}
 
-	spin_unlock_irqrestore(&chan->lock, flags);
-
 	return chan;
 }
 EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
@@ -304,20 +306,19 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
 		return;
 	}
 
+	if (pcc_doorbell_irq[id] > 0)
+		devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
+
 	spin_lock_irqsave(&chan->lock, flags);
 	chan->cl = NULL;
 	chan->active_req = NULL;
 	if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
 		chan->txdone_method = TXDONE_BY_POLL;
 
-	if (pcc_doorbell_irq[id] > 0)
-		devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
-
 	spin_unlock_irqrestore(&chan->lock, flags);
 }
 EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
 
-
 /**
  * pcc_send_data - Called from Mailbox Controller code. Used
  *		here only to ring the channel doorbell. The PCC client
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c
index be19afe..93f59bf 100644
--- a/drivers/media/dvb-frontends/gp8psk-fe.c
+++ b/drivers/media/dvb-frontends/gp8psk-fe.c
@@ -1,5 +1,5 @@
-/* DVB USB compliant Linux driver for the
- *  - GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module
+/*
+ * Frontend driver for the GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module
  *
  * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com)
  * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com)
@@ -8,11 +8,9 @@
  *
  * This module is based off the vp7045 and vp702x modules
  *
- *	This program is free software; you can redistribute it and/or modify it
- *	under the terms of the GNU General Public License as published by the Free
- *	Software Foundation, version 2.
- *
- * see Documentation/dvb/README.dvb-usb for more information
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation, version 2.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -395,3 +393,8 @@ static struct dvb_frontend_ops gp8psk_fe_ops = {
 	.dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd,
 	.enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage
 };
+
+MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>");
+MODULE_DESCRIPTION("Frontend Driver for Genpix DVB-S");
+MODULE_VERSION("1.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 3228fd1..9ff2439 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -123,19 +123,6 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
 	.properties = apl_i2c_properties,
 };
 
-static const struct intel_lpss_platform_info kbl_info = {
-	.clk_rate = 120000000,
-};
-
-static const struct intel_lpss_platform_info kbl_uart_info = {
-	.clk_rate = 120000000,
-	.clk_con_id = "baudclk",
-};
-
-static const struct intel_lpss_platform_info kbl_i2c_info = {
-	.clk_rate = 133000000,
-};
-
 static const struct pci_device_id intel_lpss_pci_ids[] = {
 	/* BXT A-Step */
 	{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
@@ -207,15 +194,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
 	{ PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info },
 	{ PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info },
 	/* KBL-H */
-	{ PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&kbl_uart_info },
-	{ PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&kbl_uart_info },
-	{ PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&kbl_info },
-	{ PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&kbl_info },
-	{ PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&kbl_i2c_info },
-	{ PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&kbl_i2c_info },
-	{ PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&kbl_i2c_info },
-	{ PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&kbl_i2c_info },
-	{ PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&kbl_uart_info },
+	{ PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info },
+	{ PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info },
+	{ PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&spt_i2c_info },
+	{ PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&spt_i2c_info },
+	{ PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&spt_i2c_info },
+	{ PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&spt_i2c_info },
+	{ PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info },
 	{ }
 };
 MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 41b1138..70c646b 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -502,9 +502,6 @@ int intel_lpss_suspend(struct device *dev)
 	for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
 		lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
 
-	/* Put the device into reset state */
-	writel(0, lpss->priv + LPSS_PRIV_RESETS);
-
 	return 0;
 }
 EXPORT_SYMBOL_GPL(intel_lpss_suspend);
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index 43e54b7..f9a8c52 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -86,6 +86,7 @@ enum bxtwc_irqs_level2 {
 	BXTWC_THRM2_IRQ,
 	BXTWC_BCU_IRQ,
 	BXTWC_ADC_IRQ,
+	BXTWC_USBC_IRQ,
 	BXTWC_CHGR0_IRQ,
 	BXTWC_CHGR1_IRQ,
 	BXTWC_GPIO0_IRQ,
@@ -111,7 +112,8 @@ static const struct regmap_irq bxtwc_regmap_irqs_level2[] = {
 	REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff),
 	REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f),
 	REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff),
-	REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x3f),
+	REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 5, BIT(5)),
+	REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x1f),
 	REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f),
 	REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff),
 	REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f),
@@ -146,7 +148,7 @@ static struct resource adc_resources[] = {
 };
 
 static struct resource usbc_resources[] = {
-	DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "USBC"),
+	DEFINE_RES_IRQ(BXTWC_USBC_IRQ),
 };
 
 static struct resource charger_resources[] = {
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 3ac486a..c57e407 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -399,6 +399,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
 					clones[i]);
 	}
 
+	put_device(dev);
+
 	return 0;
 }
 EXPORT_SYMBOL(mfd_clone_cell);
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index cfdae8a..b0c7bcd 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -851,6 +851,8 @@ static int stmpe_reset(struct stmpe *stmpe)
 	if (ret < 0)
 		return ret;
 
+	msleep(10);
+
 	timeout = jiffies + msecs_to_jiffies(100);
 	while (time_before(jiffies, timeout)) {
 		ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]);
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 3eb7430..f8ff25c 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -142,6 +142,9 @@ struct plx_pci_card {
 #define CTI_PCI_VENDOR_ID		0x12c4
 #define CTI_PCI_DEVICE_ID_CRG001	0x0900
 
+#define MOXA_PCI_VENDOR_ID		0x1393
+#define MOXA_PCI_DEVICE_ID		0x0100
+
 static void plx_pci_reset_common(struct pci_dev *pdev);
 static void plx9056_pci_reset_common(struct pci_dev *pdev);
 static void plx_pci_reset_marathon_pci(struct pci_dev *pdev);
@@ -258,6 +261,14 @@ static struct plx_pci_card_info plx_pci_card_info_elcus = {
 	/* based on PLX9030 */
 };
 
+static struct plx_pci_card_info plx_pci_card_info_moxa = {
+	"MOXA", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {0, 0x00, 0x80}, {1, 0x00, 0x80} },
+	&plx_pci_reset_common
+	 /* based on PLX9052 */
+};
+
 static const struct pci_device_id plx_pci_tbl[] = {
 	{
 		/* Adlink PCI-7841/cPCI-7841 */
@@ -357,6 +368,13 @@ static const struct pci_device_id plx_pci_tbl[] = {
 		0, 0,
 		(kernel_ulong_t)&plx_pci_card_info_elcus
 	},
+	{
+		/* moxa */
+		MOXA_PCI_VENDOR_ID, MOXA_PCI_DEVICE_ID,
+		PCI_ANY_ID, PCI_ANY_ID,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_moxa
+	},
 	{ 0,}
 };
 MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index c481f10..5390ae8 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -204,17 +204,6 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
 	return num_msgs;
 }
 
-static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
-{
-	u32 data = 0x7777;
-
-	xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
-	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
-	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
-	xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
-	xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
-}
-
 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
 			    struct xgene_enet_pdata *pdata,
 			    enum xgene_enet_err_code status)
@@ -929,5 +918,4 @@ struct xgene_ring_ops xgene_ring1_ops = {
 	.clear = xgene_enet_clear_ring,
 	.wr_cmd = xgene_enet_wr_cmd,
 	.len = xgene_enet_ring_len,
-	.coalesce = xgene_enet_setup_coalescing,
 };
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 8456337..06e598c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -55,8 +55,10 @@ enum xgene_enet_rm {
 #define PREFETCH_BUF_EN		BIT(21)
 #define CSR_RING_ID_BUF		0x000c
 #define CSR_PBM_COAL		0x0014
+#define CSR_PBM_CTICK0		0x0018
 #define CSR_PBM_CTICK1		0x001c
 #define CSR_PBM_CTICK2		0x0020
+#define CSR_PBM_CTICK3		0x0024
 #define CSR_THRESHOLD0_SET1	0x0030
 #define CSR_THRESHOLD1_SET1	0x0034
 #define CSR_RING_NE_INT_MODE	0x017c
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 429f18f..8158d46 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1188,7 +1188,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
 		tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
 	}
 
-	pdata->ring_ops->coalesce(pdata->tx_ring[0]);
+	if (pdata->ring_ops->coalesce)
+		pdata->ring_ops->coalesce(pdata->tx_ring[0]);
 	pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
 
 	return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
index 2b76732..af51dd5 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
@@ -30,7 +30,7 @@ static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
 		ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
 		ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
 	}
-	ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1);
+	ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2);
 
 	addr >>= 8;
 	ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
@@ -192,13 +192,15 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
 
 static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
 {
-	u32 data = 0x7777;
+	u32 data = 0x77777777;
 
 	xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
+	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data);
 	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
-	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
-	xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
-	xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
+	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data);
+	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data);
+	xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08);
+	xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10);
 }
 
 struct xgene_ring_ops xgene_ring2_ops = {
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 31ca204..49f4cafe 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -307,6 +307,10 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
 	u32 ctl;
 
 	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
+
+	/* preserve ONLY bits 16-17 from current hardware value */
+	ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
+
 	if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
 		ctl &= ~BGMAC_DMA_RX_BL_MASK;
 		ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
@@ -317,7 +321,6 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
 		ctl &= ~BGMAC_DMA_RX_PT_MASK;
 		ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
 	}
-	ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
 	ctl |= BGMAC_DMA_RX_ENABLE;
 	ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
 	ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
@@ -1046,9 +1049,9 @@ static void bgmac_enable(struct bgmac *bgmac)
 
 	mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
 		BGMAC_DS_MM_SHIFT;
-	if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) || mode != 0)
+	if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
 		bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
-	if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2)
+	if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2)
 		bgmac_cco_ctl_maskset(bgmac, 1, ~0,
 				      BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index b3791b3..1f7034d 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -49,6 +49,7 @@
 #include <linux/firmware.h>
 #include <linux/log2.h>
 #include <linux/aer.h>
+#include <linux/crash_dump.h>
 
 #if IS_ENABLED(CONFIG_CNIC)
 #define BCM_CNIC 1
@@ -4764,15 +4765,16 @@ bnx2_setup_msix_tbl(struct bnx2 *bp)
 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
 }
 
-static int
-bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
+static void
+bnx2_wait_dma_complete(struct bnx2 *bp)
 {
 	u32 val;
-	int i, rc = 0;
-	u8 old_port;
+	int i;
 
-	/* Wait for the current PCI transaction to complete before
-	 * issuing a reset. */
+	/*
+	 * Wait for the current PCI transaction to complete before
+	 * issuing a reset.
+	 */
 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
@@ -4796,6 +4798,21 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
 		}
 	}
 
+	return;
+}
+
+
+static int
+bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
+{
+	u32 val;
+	int i, rc = 0;
+	u8 old_port;
+
+	/* Wait for the current PCI transaction to complete before
+	 * issuing a reset. */
+	bnx2_wait_dma_complete(bp);
+
 	/* Wait for the firmware to tell us it is ok to issue a reset. */
 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
 
@@ -6361,6 +6378,10 @@ bnx2_open(struct net_device *dev)
 	struct bnx2 *bp = netdev_priv(dev);
 	int rc;
 
+	rc = bnx2_request_firmware(bp);
+	if (rc < 0)
+		goto out;
+
 	netif_carrier_off(dev);
 
 	bnx2_disable_int(bp);
@@ -6429,6 +6450,7 @@ bnx2_open(struct net_device *dev)
 	bnx2_free_irq(bp);
 	bnx2_free_mem(bp);
 	bnx2_del_napi(bp);
+	bnx2_release_firmware(bp);
 	goto out;
 }
 
@@ -8575,12 +8597,15 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	pci_set_drvdata(pdev, dev);
 
-	rc = bnx2_request_firmware(bp);
-	if (rc < 0)
-		goto error;
+	/*
+	 * In-flight DMA from 1st kernel could continue going in kdump kernel.
+	 * New io-page table has been created before bnx2 does reset at open stage.
+	 * We have to wait for the in-flight DMA to complete to avoid it look up
+	 * into the newly created io-page table.
+	 */
+	if (is_kdump_kernel())
+		bnx2_wait_dma_complete(bp);
 
-
-	bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
 	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
 
 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -8613,7 +8638,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	return 0;
 
 error:
-	bnx2_release_firmware(bp);
 	pci_iounmap(pdev, bp->regview);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index a9f9f37..c690966 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6309,6 +6309,7 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
 			 struct tc_to_netdev *ntc)
 {
 	struct bnxt *bp = netdev_priv(dev);
+	bool sh = false;
 	u8 tc;
 
 	if (ntc->type != TC_SETUP_MQPRIO)
@@ -6325,12 +6326,11 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
 	if (netdev_get_num_tc(dev) == tc)
 		return 0;
 
+	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+		sh = true;
+
 	if (tc) {
 		int max_rx_rings, max_tx_rings, rc;
-		bool sh = false;
-
-		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
-			sh = true;
 
 		rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
 		if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
@@ -6348,7 +6348,8 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
 		netdev_reset_tc(dev);
 	}
-	bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
+			       bp->tx_nr_rings + bp->rx_nr_rings;
 	bp->num_stat_ctxs = bp->cp_nr_rings;
 
 	if (netif_running(bp->dev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index ec6cd18..60e2af8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -774,8 +774,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
 
 		if (vf->flags & BNXT_VF_LINK_UP) {
 			/* if physical link is down, force link up on VF */
-			if (phy_qcfg_resp.link ==
-			    PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
+			if (phy_qcfg_resp.link !=
+			    PORT_PHY_QCFG_RESP_LINK_LINK) {
 				phy_qcfg_resp.link =
 					PORT_PHY_QCFG_RESP_LINK_LINK;
 				phy_qcfg_resp.link_speed = cpu_to_le16(
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index f9df4b5a..f42f672 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -177,6 +177,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
 		return 0;
 
 	hw_cons = *(tcb->hw_consumer_index);
+	rmb();
 	cons = tcb->consumer_index;
 	q_depth = tcb->q_depth;
 
@@ -3094,7 +3095,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 	BNA_QE_INDX_INC(prod, q_depth);
 	tcb->producer_index = prod;
 
-	smp_mb();
+	wmb();
 
 	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
 		return NETDEV_TX_OK;
@@ -3102,7 +3103,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 	skb_tx_timestamp(skb);
 
 	bna_txq_prod_indx_doorbell(tcb);
-	smp_mb();
 
 	return NETDEV_TX_OK;
 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 50812a1..df1573c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -178,9 +178,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
 	CH_PCI_ID_TABLE_FENTRY(0x6005),
 	CH_PCI_ID_TABLE_FENTRY(0x6006),
 	CH_PCI_ID_TABLE_FENTRY(0x6007),
+	CH_PCI_ID_TABLE_FENTRY(0x6008),
 	CH_PCI_ID_TABLE_FENTRY(0x6009),
 	CH_PCI_ID_TABLE_FENTRY(0x600d),
-	CH_PCI_ID_TABLE_FENTRY(0x6010),
 	CH_PCI_ID_TABLE_FENTRY(0x6011),
 	CH_PCI_ID_TABLE_FENTRY(0x6014),
 	CH_PCI_ID_TABLE_FENTRY(0x6015),
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index c54c6fa..b6ed818 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -332,8 +332,10 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
 		return ERR_PTR(-ENODEV);
 
 	handle = dev->ops->get_handle(dev, port_id);
-	if (IS_ERR(handle))
+	if (IS_ERR(handle)) {
+		put_device(&dev->cls_dev);
 		return handle;
+	}
 
 	handle->dev = dev;
 	handle->owner_dev = owner_dev;
@@ -356,6 +358,8 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
 	for (j = i - 1; j >= 0; j--)
 		hnae_fini_queue(handle->qs[j]);
 
+	put_device(&dev->cls_dev);
+
 	return ERR_PTR(-ENOMEM);
 }
 EXPORT_SYMBOL(hnae_get_handle);
@@ -377,6 +381,8 @@ void hnae_put_handle(struct hnae_handle *h)
 		dev->ops->put_handle(h);
 
 	module_put(dev->owner);
+
+	put_device(&dev->cls_dev);
 }
 EXPORT_SYMBOL(hnae_put_handle);
 
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 54efa9a..bd719e2 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2446,6 +2446,8 @@ static int ehea_open(struct net_device *dev)
 
 	netif_info(port, ifup, dev, "enabling port\n");
 
+	netif_carrier_off(dev);
+
 	ret = ehea_up(dev);
 	if (!ret) {
 		port_napi_enable(port);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5f44c55..4f3281a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1505,9 +1505,8 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
 		    adapter->max_rx_add_entries_per_subcrq > entries_page ?
 		    entries_page : adapter->max_rx_add_entries_per_subcrq;
 
-		/* Choosing the maximum number of queues supported by firmware*/
-		adapter->req_tx_queues = adapter->max_tx_queues;
-		adapter->req_rx_queues = adapter->max_rx_queues;
+		adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
+		adapter->req_rx_queues = adapter->opt_rx_comp_queues;
 		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
 
 		adapter->req_mtu = adapter->max_mtu;
@@ -3706,7 +3705,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 	struct net_device *netdev;
 	unsigned char *mac_addr_p;
 	struct dentry *ent;
-	char buf[16]; /* debugfs name buf */
+	char buf[17]; /* debugfs name buf */
 	int rc;
 
 	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
@@ -3845,6 +3844,9 @@ static int ibmvnic_remove(struct vio_dev *dev)
 	if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
 		debugfs_remove_recursive(adapter->debugfs_dir);
 
+	dma_unmap_single(&dev->dev, adapter->stats_token,
+			 sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
+
 	if (adapter->ras_comps)
 		dma_free_coherent(&dev->dev,
 				  adapter->ras_comp_num *
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index bf5cc55b..5b12022 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1381,6 +1381,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
 		temp = (val & 0x003fff00) >> 8;
 
 	temp *= 64000000;
+	temp += mp->t_clk / 2;
 	do_div(temp, mp->t_clk);
 
 	return (unsigned int)temp;
@@ -1417,6 +1418,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
 
 	temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
 	temp *= 64000000;
+	temp += mp->t_clk / 2;
 	do_div(temp, mp->t_clk);
 
 	return (unsigned int)temp;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 12c99a2..3a47e83 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2202,7 +2202,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
 
 	if (!shutdown)
 		free_netdev(dev);
-	dev->ethtool_ops = NULL;
 }
 
 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index f4c687c..84e8b25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1445,6 +1445,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 	c->netdev   = priv->netdev;
 	c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
 	c->num_tc   = priv->params.num_tc;
+	c->xdp      = !!priv->xdp_prog;
 
 	if (priv->params.rx_am_enabled)
 		rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
@@ -1468,6 +1469,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 	if (err)
 		goto err_close_tx_cqs;
 
+	/* XDP SQ CQ params are same as normal TXQ sq CQ params */
+	err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
+				     priv->params.tx_cq_moderation) : 0;
+	if (err)
+		goto err_close_rx_cq;
+
 	napi_enable(&c->napi);
 
 	err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
@@ -1488,21 +1495,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 		}
 	}
 
-	if (priv->xdp_prog) {
-		/* XDP SQ CQ params are same as normal TXQ sq CQ params */
-		err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
-				    priv->params.tx_cq_moderation);
-		if (err)
-			goto err_close_sqs;
+	err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0;
+	if (err)
+		goto err_close_sqs;
 
-		err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq);
-		if (err) {
-			mlx5e_close_cq(&c->xdp_sq.cq);
-			goto err_close_sqs;
-		}
-	}
-
-	c->xdp = !!priv->xdp_prog;
 	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
 	if (err)
 		goto err_close_xdp_sq;
@@ -1512,7 +1508,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 
 	return 0;
 err_close_xdp_sq:
-	mlx5e_close_sq(&c->xdp_sq);
+	if (c->xdp)
+		mlx5e_close_sq(&c->xdp_sq);
 
 err_close_sqs:
 	mlx5e_close_sqs(c);
@@ -1522,6 +1519,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 
 err_disable_napi:
 	napi_disable(&c->napi);
+	if (c->xdp)
+		mlx5e_close_cq(&c->xdp_sq.cq);
+
+err_close_rx_cq:
 	mlx5e_close_cq(&c->rq.cq);
 
 err_close_tx_cqs:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 7fe6559..bf1c09c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -308,7 +308,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
 	netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
 #endif
 
-	netdev->features	 |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC;
+	netdev->features	 |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
 	netdev->hw_features      |= NETIF_F_HW_TC;
 
 	eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ce8c54d..6bb21b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -237,12 +237,15 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
 			skb_flow_dissector_target(f->dissector,
 						  FLOW_DISSECTOR_KEY_VLAN,
 						  f->mask);
-		if (mask->vlan_id) {
+		if (mask->vlan_id || mask->vlan_priority) {
 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
 
 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
+
+			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
+			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
 		}
 	}
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index c55ad8d..d239f5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -57,7 +57,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
 	if (esw->mode != SRIOV_OFFLOADS)
 		return ERR_PTR(-EOPNOTSUPP);
 
-	action = attr->action;
+	/* per flow vlan pop/push is emulated, don't set that into the firmware */
+	action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
 
 	if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
 		dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 8969604..914e546 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1690,7 +1690,7 @@ static int init_root_ns(struct mlx5_flow_steering *steering)
 {
 
 	steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
-	if (IS_ERR_OR_NULL(steering->root_ns))
+	if (!steering->root_ns)
 		goto cleanup;
 
 	if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d5433c4..3eb9315 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1226,6 +1226,9 @@ static int init_one(struct pci_dev *pdev,
 
 	pci_set_drvdata(pdev, dev);
 
+	dev->pdev = pdev;
+	dev->event = mlx5_core_event;
+
 	if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
 		mlx5_core_warn(dev,
 			       "selected profile out of range, selecting default (%d)\n",
@@ -1233,8 +1236,6 @@ static int init_one(struct pci_dev *pdev,
 		prof_sel = MLX5_DEFAULT_PROF;
 	}
 	dev->profile = &profile[prof_sel];
-	dev->pdev = pdev;
-	dev->event = mlx5_core_event;
 
 	INIT_LIST_HEAD(&priv->ctx_list);
 	spin_lock_init(&priv->ctx_lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 1ec0a4c..dda5761 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -231,7 +231,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
 
 	span_entry->used = true;
 	span_entry->id = index;
-	span_entry->ref_count = 0;
+	span_entry->ref_count = 1;
 	span_entry->local_port = local_port;
 	return span_entry;
 }
@@ -270,6 +270,7 @@ static struct mlxsw_sp_span_entry
 
 	span_entry = mlxsw_sp_span_entry_find(port);
 	if (span_entry) {
+		/* Already exists, just take a reference */
 		span_entry->ref_count++;
 		return span_entry;
 	}
@@ -280,6 +281,7 @@ static struct mlxsw_sp_span_entry
 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
 				   struct mlxsw_sp_span_entry *span_entry)
 {
+	WARN_ON(!span_entry->ref_count);
 	if (--span_entry->ref_count == 0)
 		mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
 	return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 9b22863..97bbc1d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -115,7 +115,7 @@ struct mlxsw_sp_rif {
 struct mlxsw_sp_mid {
 	struct list_head list;
 	unsigned char addr[ETH_ALEN];
-	u16 vid;
+	u16 fid;
 	u16 mid;
 	unsigned int ref_count;
 };
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 4573da2..e83072d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -594,21 +594,22 @@ static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
 	return 0;
 }
 
+static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
+
 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
 {
+	mlxsw_sp_router_fib_flush(mlxsw_sp);
 	kfree(mlxsw_sp->router.vrs);
 }
 
 struct mlxsw_sp_neigh_key {
-	unsigned char addr[sizeof(struct in6_addr)];
-	struct net_device *dev;
+	struct neighbour *n;
 };
 
 struct mlxsw_sp_neigh_entry {
 	struct rhash_head ht_node;
 	struct mlxsw_sp_neigh_key key;
 	u16 rif;
-	struct neighbour *n;
 	bool offloaded;
 	struct delayed_work dw;
 	struct mlxsw_sp_port *mlxsw_sp_port;
@@ -646,19 +647,15 @@ mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
 static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
 
 static struct mlxsw_sp_neigh_entry *
-mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len,
-			    struct net_device *dev, u16 rif,
-			    struct neighbour *n)
+mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif)
 {
 	struct mlxsw_sp_neigh_entry *neigh_entry;
 
 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
 	if (!neigh_entry)
 		return NULL;
-	memcpy(neigh_entry->key.addr, addr, addr_len);
-	neigh_entry->key.dev = dev;
+	neigh_entry->key.n = n;
 	neigh_entry->rif = rif;
-	neigh_entry->n = n;
 	INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
 	return neigh_entry;
@@ -671,13 +668,11 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
 }
 
 static struct mlxsw_sp_neigh_entry *
-mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr,
-			    size_t addr_len, struct net_device *dev)
+mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
 {
-	struct mlxsw_sp_neigh_key key = {{ 0 } };
+	struct mlxsw_sp_neigh_key key;
 
-	memcpy(key.addr, addr, addr_len);
-	key.dev = dev;
+	key.n = n;
 	return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
 				      &key, mlxsw_sp_neigh_ht_params);
 }
@@ -689,26 +684,20 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	struct mlxsw_sp_neigh_entry *neigh_entry;
 	struct mlxsw_sp_rif *r;
-	u32 dip;
 	int err;
 
 	if (n->tbl != &arp_tbl)
 		return 0;
 
-	dip = ntohl(*((__be32 *) n->primary_key));
-	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
-						  n->dev);
-	if (neigh_entry) {
-		WARN_ON(neigh_entry->n != n);
+	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
+	if (neigh_entry)
 		return 0;
-	}
 
 	r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
 	if (WARN_ON(!r))
 		return -EINVAL;
 
-	neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev,
-						  r->rif, n);
+	neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif);
 	if (!neigh_entry)
 		return -ENOMEM;
 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
@@ -727,14 +716,11 @@ void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	struct mlxsw_sp_neigh_entry *neigh_entry;
-	u32 dip;
 
 	if (n->tbl != &arp_tbl)
 		return;
 
-	dip = ntohl(*((__be32 *) n->primary_key));
-	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
-						  n->dev);
+	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
 	if (!neigh_entry)
 		return;
 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
@@ -817,6 +803,26 @@ static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
 	}
 }
 
+static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
+{
+	u8 num_rec, last_rec_index, num_entries;
+
+	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
+	last_rec_index = num_rec - 1;
+
+	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
+		return false;
+	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
+	    MLXSW_REG_RAUHTD_TYPE_IPV6)
+		return true;
+
+	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
+								last_rec_index);
+	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
+		return true;
+	return false;
+}
+
 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
 {
 	char *rauhtd_pl;
@@ -843,7 +849,7 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
 		for (i = 0; i < num_rec; i++)
 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
 							  i);
-	} while (num_rec);
+	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
 	rtnl_unlock();
 
 	kfree(rauhtd_pl);
@@ -862,7 +868,7 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
 		 * is active regardless of the traffic.
 		 */
 		if (!list_empty(&neigh_entry->nexthop_list))
-			neigh_event_send(neigh_entry->n, NULL);
+			neigh_event_send(neigh_entry->key.n, NULL);
 	}
 	rtnl_unlock();
 }
@@ -908,9 +914,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
 	rtnl_lock();
 	list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
 			    nexthop_neighs_list_node) {
-		if (!(neigh_entry->n->nud_state & NUD_VALID) &&
+		if (!(neigh_entry->key.n->nud_state & NUD_VALID) &&
 		    !list_empty(&neigh_entry->nexthop_list))
-			neigh_event_send(neigh_entry->n, NULL);
+			neigh_event_send(neigh_entry->key.n, NULL);
 	}
 	rtnl_unlock();
 
@@ -927,7 +933,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
 {
 	struct mlxsw_sp_neigh_entry *neigh_entry =
 		container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
-	struct neighbour *n = neigh_entry->n;
+	struct neighbour *n = neigh_entry->key.n;
 	struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
@@ -1030,11 +1036,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
 
 		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 		dip = ntohl(*((__be32 *) n->primary_key));
-		neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp,
-							  &dip,
-							  sizeof(__be32),
-							  dev);
-		if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) {
+		neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
+		if (WARN_ON(!neigh_entry)) {
 			mlxsw_sp_port_dev_put(mlxsw_sp_port);
 			return NOTIFY_DONE;
 		}
@@ -1343,33 +1346,26 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
 				 struct fib_nh *fib_nh)
 {
 	struct mlxsw_sp_neigh_entry *neigh_entry;
-	u32 gwip = ntohl(fib_nh->nh_gw);
 	struct net_device *dev = fib_nh->nh_dev;
 	struct neighbour *n;
 	u8 nud_state;
 
-	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
-						  sizeof(gwip), dev);
-	if (!neigh_entry) {
-		__be32 gwipn = htonl(gwip);
-
-		n = neigh_create(&arp_tbl, &gwipn, dev);
+	/* Take a reference of neigh here ensuring that neigh would
+	 * not be detructed before the nexthop entry is finished.
+	 * The reference is taken either in neigh_lookup() or
+	 * in neith_create() in case n is not found.
+	 */
+	n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev);
+	if (!n) {
+		n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev);
 		if (IS_ERR(n))
 			return PTR_ERR(n);
 		neigh_event_send(n, NULL);
-		neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
-							  sizeof(gwip), dev);
-		if (!neigh_entry) {
-			neigh_release(n);
-			return -EINVAL;
-		}
-	} else {
-		/* Take a reference of neigh here ensuring that neigh would
-		 * not be detructed before the nexthop entry is finished.
-		 * The second branch takes the reference in neith_create()
-		 */
-		n = neigh_entry->n;
-		neigh_clone(n);
+	}
+	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
+	if (!neigh_entry) {
+		neigh_release(n);
+		return -EINVAL;
 	}
 
 	/* If that is the first nexthop connected to that neigh, add to
@@ -1403,7 +1399,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
 	if (list_empty(&nh->neigh_entry->nexthop_list))
 		list_del(&nh->neigh_entry->nexthop_neighs_list_node);
 
-	neigh_release(neigh_entry->n);
+	neigh_release(neigh_entry->key.n);
 }
 
 static struct mlxsw_sp_nexthop_group *
@@ -1463,11 +1459,11 @@ static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
 
 	for (i = 0; i < fi->fib_nhs; i++) {
 		struct fib_nh *fib_nh = &fi->fib_nh[i];
-		u32 gwip = ntohl(fib_nh->nh_gw);
+		struct neighbour *n = nh->neigh_entry->key.n;
 
-		if (memcmp(nh->neigh_entry->key.addr,
-			   &gwip, sizeof(u32)) == 0 &&
-		    nh->neigh_entry->key.dev == fib_nh->nh_dev)
+		if (memcmp(n->primary_key, &fib_nh->nh_gw,
+			   sizeof(fib_nh->nh_gw)) == 0 &&
+		    n->dev == fib_nh->nh_dev)
 			return true;
 	}
 	return false;
@@ -1874,18 +1870,18 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
 }
 
-static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
 {
 	struct mlxsw_resources *resources;
 	struct mlxsw_sp_fib_entry *fib_entry;
 	struct mlxsw_sp_fib_entry *tmp;
 	struct mlxsw_sp_vr *vr;
 	int i;
-	int err;
 
 	resources = mlxsw_core_resources_get(mlxsw_sp->core);
 	for (i = 0; i < resources->max_virtual_routers; i++) {
 		vr = &mlxsw_sp->router.vrs[i];
+
 		if (!vr->used)
 			continue;
 
@@ -1901,6 +1897,13 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
 				break;
 		}
 	}
+}
+
+static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
+{
+	int err;
+
+	mlxsw_sp_router_fib_flush(mlxsw_sp);
 	mlxsw_sp->router.aborted = true;
 	err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
 	if (err)
@@ -1958,6 +1961,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
 	struct fib_entry_notifier_info *fen_info = ptr;
 	int err;
 
+	if (!net_eq(fen_info->info.net, &init_net))
+		return NOTIFY_DONE;
+
 	switch (event) {
 	case FIB_EVENT_ENTRY_ADD:
 		err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 5e00c79..1e2c8ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -929,12 +929,12 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
 
 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
 					      const unsigned char *addr,
-					      u16 vid)
+					      u16 fid)
 {
 	struct mlxsw_sp_mid *mid;
 
 	list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
-		if (ether_addr_equal(mid->addr, addr) && mid->vid == vid)
+		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
 			return mid;
 	}
 	return NULL;
@@ -942,7 +942,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
 
 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
 						const unsigned char *addr,
-						u16 vid)
+						u16 fid)
 {
 	struct mlxsw_sp_mid *mid;
 	u16 mid_idx;
@@ -958,7 +958,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
 
 	set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
 	ether_addr_copy(mid->addr, addr);
-	mid->vid = vid;
+	mid->fid = fid;
 	mid->mid = mid_idx;
 	mid->ref_count = 0;
 	list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
@@ -991,9 +991,9 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
 	if (switchdev_trans_ph_prepare(trans))
 		return 0;
 
-	mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
+	mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
 	if (!mid) {
-		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid);
+		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
 		if (!mid) {
 			netdev_err(dev, "Unable to allocate MC group\n");
 			return -ENOMEM;
@@ -1137,7 +1137,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
 	u16 mid_idx;
 	int err = 0;
 
-	mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
+	mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
 	if (!mid) {
 		netdev_err(dev, "Unable to remove port from MC DB\n");
 		return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 72eee29..2777d5b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -727,9 +727,6 @@ struct core_tx_bd_flags {
 #define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT	6
 #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK	0x1
 #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
-#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK		0x1
-#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT	12
-
 };
 
 struct core_tx_bd {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 63e1a1b..f95385c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1119,6 +1119,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
 	start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
 	    CORE_TX_BD_FLAGS_START_BD_SHIFT;
 	SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
+	SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
 	DMA_REGPAIR_LE(start_bd->addr, first_frag);
 	start_bd->nbytes = cpu_to_le16(first_frag_len);
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c418360..333c744 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -839,20 +839,19 @@ static void qed_update_pf_params(struct qed_dev *cdev,
 {
 	int i;
 
+	if (IS_ENABLED(CONFIG_QED_RDMA)) {
+		params->rdma_pf_params.num_qps = QED_ROCE_QPS;
+		params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+		/* divide by 3 the MRs to avoid MF ILT overflow */
+		params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
+		params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
+	}
+
 	for (i = 0; i < cdev->num_hwfns; i++) {
 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
 		p_hwfn->pf_params = *params;
 	}
-
-	if (!IS_ENABLED(CONFIG_QED_RDMA))
-		return;
-
-	params->rdma_pf_params.num_qps = QED_ROCE_QPS;
-	params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
-	/* divide by 3 the MRs to avoid MF ILT overflow */
-	params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
-	params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
 }
 
 static int qed_slowpath_start(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 12251a1..7567cc4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -175,16 +175,23 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
 	for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
 		int tc;
 
-		for (j = 0; j < QEDE_NUM_RQSTATS; j++)
-			sprintf(buf + (k + j) * ETH_GSTRING_LEN,
-				"%d:   %s", i, qede_rqstats_arr[j].string);
-		k += QEDE_NUM_RQSTATS;
-		for (tc = 0; tc < edev->num_tc; tc++) {
-			for (j = 0; j < QEDE_NUM_TQSTATS; j++)
+		if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+			for (j = 0; j < QEDE_NUM_RQSTATS; j++)
 				sprintf(buf + (k + j) * ETH_GSTRING_LEN,
-					"%d.%d: %s", i, tc,
-					qede_tqstats_arr[j].string);
-			k += QEDE_NUM_TQSTATS;
+					"%d:   %s", i,
+					qede_rqstats_arr[j].string);
+			k += QEDE_NUM_RQSTATS;
+		}
+
+		if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+			for (tc = 0; tc < edev->num_tc; tc++) {
+				for (j = 0; j < QEDE_NUM_TQSTATS; j++)
+					sprintf(buf + (k + j) *
+						ETH_GSTRING_LEN,
+						"%d.%d: %s", i, tc,
+						qede_tqstats_arr[j].string);
+				k += QEDE_NUM_TQSTATS;
+			}
 		}
 	}
 
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 7def29a..85f46db 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -2839,7 +2839,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
 		}
 
 		mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
-				       rxq->rx_buf_size, DMA_FROM_DEVICE);
+				       PAGE_SIZE, DMA_FROM_DEVICE);
 		if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
 			DP_NOTICE(edev,
 				  "Failed to map TPA replacement buffer\n");
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 6fb3bee..0b4deb3 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -575,10 +575,11 @@ void emac_mac_start(struct emac_adapter *adpt)
 
 	mac |= TXEN | RXEN;     /* enable RX/TX */
 
-	/* We don't have ethtool support yet, so force flow-control mode
-	 * to 'full' always.
-	 */
-	mac |= TXFC | RXFC;
+	/* Configure MAC flow control to match the PHY's settings. */
+	if (phydev->pause)
+		mac |= RXFC;
+	if (phydev->pause != phydev->asym_pause)
+		mac |= TXFC;
 
 	/* setup link speed */
 	mac &= ~SPEED_MASK;
@@ -1003,6 +1004,12 @@ int emac_mac_up(struct emac_adapter *adpt)
 	writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
 	writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
 
+	/* Enable pause frames.  Without this feature, the EMAC has been shown
+	 * to receive (and drop) frames with FCS errors at gigabit connections.
+	 */
+	adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+	adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
 	adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
 	phy_start(adpt->phydev);
 
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index 75c1b53..72fe343 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -421,7 +421,7 @@ static const struct emac_reg_write sgmii_v2_laned[] = {
 	/* CDR Settings */
 	{EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
 		UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
-	{EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)},
+	{EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)},
 	{EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
 
 	/* TX/RX Settings */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3cf3557..6b89e4a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -485,6 +485,9 @@ efx_copy_channel(const struct efx_channel *old_channel)
 	*channel = *old_channel;
 
 	channel->napi_dev = NULL;
+	INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
+	channel->napi_str.napi_id = 0;
+	channel->napi_str.state = 0;
 	memset(&channel->eventq, 0, sizeof(channel->eventq));
 
 	for (j = 0; j < EFX_TXQ_TYPES; j++) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 48e71fa..e2c94ec 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -880,6 +880,13 @@ static int stmmac_init_phy(struct net_device *dev)
 		return -ENODEV;
 	}
 
+	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
+	 * subsequent PHY polling, make sure we force a link transition if
+	 * we have a UP/DOWN/UP transition
+	 */
+	if (phydev->is_pseudo_fixed_link)
+		phydev->irq = PHY_POLL;
+
 	pr_debug("stmmac_init_phy:  %s: attached to PHY (UID 0x%x)"
 		 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
 
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 054a8dd..ba1e45f 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -176,9 +176,12 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
 	}
 
 	dev = bus_find_device(&platform_bus_type, NULL, node, match);
+	of_node_put(node);
 	priv = dev_get_drvdata(dev);
 
 	priv->cpsw_phy_sel(priv, phy_mode, slave);
+
+	put_device(dev);
 }
 EXPORT_SYMBOL_GPL(cpsw_phy_sel);
 
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2fd94a5..84fbe571 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1410,6 +1410,7 @@ static int emac_dev_open(struct net_device *ndev)
 	int i = 0;
 	struct emac_priv *priv = netdev_priv(ndev);
 	struct phy_device *phydev = NULL;
+	struct device *phy = NULL;
 
 	ret = pm_runtime_get_sync(&priv->pdev->dev);
 	if (ret < 0) {
@@ -1488,19 +1489,20 @@ static int emac_dev_open(struct net_device *ndev)
 
 	/* use the first phy on the bus if pdata did not give us a phy id */
 	if (!phydev && !priv->phy_id) {
-		struct device *phy;
-
 		phy = bus_find_device(&mdio_bus_type, NULL, NULL,
 				      match_first_device);
-		if (phy)
+		if (phy) {
 			priv->phy_id = dev_name(phy);
+			if (!priv->phy_id || !*priv->phy_id)
+				put_device(phy);
+		}
 	}
 
 	if (!phydev && priv->phy_id && *priv->phy_id) {
 		phydev = phy_connect(ndev, priv->phy_id,
 				     &emac_adjust_link,
 				     PHY_INTERFACE_MODE_MII);
-
+		put_device(phy);	/* reference taken by bus_find_device */
 		if (IS_ERR(phydev)) {
 			dev_err(emac_dev, "could not connect to phy %s\n",
 				priv->phy_id);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 446ea58..928c1dc 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1694,7 +1694,7 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl)
 				pr_debug("%s: bssid matched\n", __func__);
 				break;
 			} else {
-				pr_debug("%s: bssid unmached\n", __func__);
+				pr_debug("%s: bssid unmatched\n", __func__);
 				continue;
 			}
 		}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 7f127dc..fa32391 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -708,8 +708,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
 			if (!qmgr_stat_below_low_watermark(rxq) &&
 			    napi_reschedule(napi)) { /* not empty again */
 #if DEBUG_RX
-				printk(KERN_DEBUG "%s: eth_poll"
-				       " napi_reschedule successed\n",
+				printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n",
 				       dev->name);
 #endif
 				qmgr_disable_irq(rxq);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 3234fcd..d2d6f12 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1278,6 +1278,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 	struct net_device *lowerdev;
 	int err;
 	int macmode;
+	bool create = false;
 
 	if (!tb[IFLA_LINK])
 		return -EINVAL;
@@ -1304,12 +1305,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 		err = macvlan_port_create(lowerdev);
 		if (err < 0)
 			return err;
+		create = true;
 	}
 	port = macvlan_port_get_rtnl(lowerdev);
 
 	/* Only 1 macvlan device can be created in passthru mode */
-	if (port->passthru)
-		return -EINVAL;
+	if (port->passthru) {
+		/* The macvlan port must be not created this time,
+		 * still goto destroy_macvlan_port for readability.
+		 */
+		err = -EINVAL;
+		goto destroy_macvlan_port;
+	}
 
 	vlan->lowerdev = lowerdev;
 	vlan->dev      = dev;
@@ -1325,24 +1332,28 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 		vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
 
 	if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
-		if (port->count)
-			return -EINVAL;
+		if (port->count) {
+			err = -EINVAL;
+			goto destroy_macvlan_port;
+		}
 		port->passthru = true;
 		eth_hw_addr_inherit(dev, lowerdev);
 	}
 
 	if (data && data[IFLA_MACVLAN_MACADDR_MODE]) {
-		if (vlan->mode != MACVLAN_MODE_SOURCE)
-			return -EINVAL;
+		if (vlan->mode != MACVLAN_MODE_SOURCE) {
+			err = -EINVAL;
+			goto destroy_macvlan_port;
+		}
 		macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]);
 		err = macvlan_changelink_sources(vlan, macmode, data);
 		if (err)
-			return err;
+			goto destroy_macvlan_port;
 	}
 
 	err = register_netdevice(dev);
 	if (err < 0)
-		return err;
+		goto destroy_macvlan_port;
 
 	dev->priv_flags |= IFF_MACVLAN;
 	err = netdev_upper_dev_link(lowerdev, dev);
@@ -1357,7 +1368,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 
 unregister_netdev:
 	unregister_netdevice(dev);
-
+destroy_macvlan_port:
+	if (create)
+		macvlan_port_destroy(port->dev);
 	return err;
 }
 EXPORT_SYMBOL_GPL(macvlan_common_newlink);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e977ba9..1a4bf8a 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -723,6 +723,7 @@ struct phy_device *phy_connect(struct net_device *dev, const char *bus_id,
 	phydev = to_phy_device(d);
 
 	rc = phy_connect_direct(dev, phydev, handler, interface);
+	put_device(d);
 	if (rc)
 		return ERR_PTR(rc);
 
@@ -953,6 +954,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
 	phydev = to_phy_device(d);
 
 	rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
+	put_device(d);
 	if (rc)
 		return ERR_PTR(rc);
 
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index e6338c1..8a6675d 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1656,6 +1656,19 @@ static const struct driver_info ax88178a_info = {
 	.tx_fixup = ax88179_tx_fixup,
 };
 
+static const struct driver_info cypress_GX3_info = {
+	.description = "Cypress GX3 SuperSpeed to Gigabit Ethernet Controller",
+	.bind = ax88179_bind,
+	.unbind = ax88179_unbind,
+	.status = ax88179_status,
+	.link_reset = ax88179_link_reset,
+	.reset = ax88179_reset,
+	.stop = ax88179_stop,
+	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+	.rx_fixup = ax88179_rx_fixup,
+	.tx_fixup = ax88179_tx_fixup,
+};
+
 static const struct driver_info dlink_dub1312_info = {
 	.description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter",
 	.bind = ax88179_bind,
@@ -1718,6 +1731,10 @@ static const struct usb_device_id products[] = {
 	USB_DEVICE(0x0b95, 0x178a),
 	.driver_info = (unsigned long)&ax88178a_info,
 }, {
+	/* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */
+	USB_DEVICE(0x04b4, 0x3610),
+	.driver_info = (unsigned long)&cypress_GX3_info,
+}, {
 	/* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
 	USB_DEVICE(0x2001, 0x4a00),
 	.driver_info = (unsigned long)&dlink_dub1312_info,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 44d439f..efb84f0 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
 	u8 checksum = CHECKSUM_NONE;
 	u32 opts2, opts3;
 
-	if (tp->version == RTL_VER_01)
+	if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
 		goto return_result;
 
 	opts2 = le32_to_cpu(rx_desc->opts2);
@@ -1745,7 +1745,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
 			checksum = CHECKSUM_NONE;
 		else
 			checksum = CHECKSUM_UNNECESSARY;
-	} else if (RD_IPV6_CS) {
+	} else if (opts2 & RD_IPV6_CS) {
 		if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF))
 			checksum = CHECKSUM_UNNECESSARY;
 		else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF))
@@ -3266,10 +3266,8 @@ static int rtl8152_open(struct net_device *netdev)
 		goto out;
 
 	res = usb_autopm_get_interface(tp->intf);
-	if (res < 0) {
-		free_all_mem(tp);
-		goto out;
-	}
+	if (res < 0)
+		goto out_free;
 
 	mutex_lock(&tp->control);
 
@@ -3285,10 +3283,9 @@ static int rtl8152_open(struct net_device *netdev)
 			netif_device_detach(tp->netdev);
 		netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
 			   res);
-		free_all_mem(tp);
-	} else {
-		napi_enable(&tp->napi);
+		goto out_unlock;
 	}
+	napi_enable(&tp->napi);
 
 	mutex_unlock(&tp->control);
 
@@ -3297,7 +3294,13 @@ static int rtl8152_open(struct net_device *netdev)
 	tp->pm_notifier.notifier_call = rtl_notifier;
 	register_pm_notifier(&tp->pm_notifier);
 #endif
+	return 0;
 
+out_unlock:
+	mutex_unlock(&tp->control);
+	usb_autopm_put_interface(tp->intf);
+out_free:
+	free_all_mem(tp);
 out:
 	return res;
 }
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fad84f3..fd8b1e6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2038,23 +2038,33 @@ static struct virtio_device_id id_table[] = {
 	{ 0 },
 };
 
+#define VIRTNET_FEATURES \
+	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
+	VIRTIO_NET_F_MAC, \
+	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
+	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
+	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
+	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
+	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
+	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
+	VIRTIO_NET_F_CTRL_MAC_ADDR, \
+	VIRTIO_NET_F_MTU
+
 static unsigned int features[] = {
-	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
-	VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
-	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
-	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
-	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
-	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
-	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
-	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
-	VIRTIO_NET_F_CTRL_MAC_ADDR,
+	VIRTNET_FEATURES,
+};
+
+static unsigned int features_legacy[] = {
+	VIRTNET_FEATURES,
+	VIRTIO_NET_F_GSO,
 	VIRTIO_F_ANY_LAYOUT,
-	VIRTIO_NET_F_MTU,
 };
 
 static struct virtio_driver virtio_net_driver = {
 	.feature_table = features,
 	.feature_table_size = ARRAY_SIZE(features),
+	.feature_table_legacy = features_legacy,
+	.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
 	.driver.name =	KBUILD_MODNAME,
 	.driver.owner =	THIS_MODULE,
 	.id_table =	id_table,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f3c2fa3..24532cd 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -944,7 +944,9 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
 {
 	struct vxlan_dev *vxlan;
 	struct vxlan_sock *sock4;
-	struct vxlan_sock *sock6 = NULL;
+#if IS_ENABLED(CONFIG_IPV6)
+	struct vxlan_sock *sock6;
+#endif
 	unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
 
 	sock4 = rtnl_dereference(dev->vn4_sock);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b777e1b..78d9966 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4516,7 +4516,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 	/* store current 11d setting */
 	if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY,
 				  &ifp->vif->is_11d)) {
-		supports_11d = false;
+		is_11d = supports_11d = false;
 	} else {
 		country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
 					      settings->beacon.tail_len,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 4fdc3da..b88e204 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
 		ret = iwl_mvm_switch_to_d3(mvm);
 		if (ret)
 			return ret;
+	} else {
+		/* In theory, we wouldn't have to stop a running sched
+		 * scan in order to start another one (for
+		 * net-detect).  But in practice this doesn't seem to
+		 * work properly, so stop any running sched_scan now.
+		 */
+		ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+		if (ret)
+			return ret;
 	}
 
 	/* rfkill release can be either for wowlan or netdetect */
@@ -1254,7 +1263,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
  out:
 	if (ret < 0) {
 		iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
-		ieee80211_restart_hw(mvm->hw);
+		if (mvm->restart_fw > 0) {
+			mvm->restart_fw--;
+			ieee80211_restart_hw(mvm->hw);
+		}
 		iwl_mvm_free_nd(mvm);
 	}
  out_noreset:
@@ -2088,6 +2100,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
 	iwl_mvm_update_changed_regdom(mvm);
 
 	if (mvm->net_detect) {
+		/* If this is a non-unified image, we restart the FW,
+		 * so no need to stop the netdetect scan.  If that
+		 * fails, continue and try to get the wake-up reasons,
+		 * but trigger a HW restart by keeping a failure code
+		 * in ret.
+		 */
+		if (unified_image)
+			ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
+						false);
+
 		iwl_mvm_query_netdetect_reasons(mvm, vif);
 		/* has unlocked the mutex, so skip that */
 		goto out;
@@ -2271,7 +2293,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
 static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
 {
 	struct iwl_mvm *mvm = inode->i_private;
-	int remaining_time = 10;
+	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
 
 	mvm->d3_test_active = false;
 
@@ -2282,18 +2305,22 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
 	mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
 
 	iwl_abort_notification_waits(&mvm->notif_wait);
-	ieee80211_restart_hw(mvm->hw);
+	if (!unified_image) {
+		int remaining_time = 10;
 
-	/* wait for restart and disconnect all interfaces */
-	while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
-	       remaining_time > 0) {
-		remaining_time--;
-		msleep(1000);
+		ieee80211_restart_hw(mvm->hw);
+
+		/* wait for restart and disconnect all interfaces */
+		while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+		       remaining_time > 0) {
+			remaining_time--;
+			msleep(1000);
+		}
+
+		if (remaining_time == 0)
+			IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
 	}
 
-	if (remaining_time == 0)
-		IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
-
 	ieee80211_iterate_active_interfaces_atomic(
 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 		iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 07da4ef..7b7d2a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1529,8 +1529,8 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
 		.data = { &cmd, },
 		.len = { sizeof(cmd) },
 	};
-	size_t delta, len;
-	ssize_t ret;
+	size_t delta;
+	ssize_t ret, len;
 
 	hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
 			     DEBUG_GROUP, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 318efd8..1db1dc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -4121,7 +4121,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
 				     struct iwl_mvm_internal_rxq_notif *notif,
 				     u32 size)
 {
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
 	u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
 	int ret;
 
@@ -4143,7 +4142,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
 	}
 
 	if (notif->sync)
-		ret = wait_event_timeout(notif_waitq,
+		ret = wait_event_timeout(mvm->rx_sync_waitq,
 					 atomic_read(&mvm->queue_sync_counter) == 0,
 					 HZ);
 	WARN_ON_ONCE(!ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index d17cbf6..c60703e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -937,6 +937,7 @@ struct iwl_mvm {
 	/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
 	spinlock_t d0i3_tx_lock;
 	wait_queue_head_t d0i3_exit_waitq;
+	wait_queue_head_t rx_sync_waitq;
 
 	/* BT-Coex */
 	struct iwl_bt_coex_profile_notif last_bt_notif;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 05fe6dd..4d35deb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -619,6 +619,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 	spin_lock_init(&mvm->refs_lock);
 	skb_queue_head_init(&mvm->d0i3_tx);
 	init_waitqueue_head(&mvm->d0i3_exit_waitq);
+	init_waitqueue_head(&mvm->rx_sync_waitq);
 
 	atomic_set(&mvm->queue_sync_counter, 0);
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index a57c6ef..6c802ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -547,7 +547,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 				  "Received expired RX queue sync message\n");
 			return;
 		}
-		atomic_dec(&mvm->queue_sync_counter);
+		if (!atomic_dec_return(&mvm->queue_sync_counter))
+			wake_up(&mvm->rx_sync_waitq);
 	}
 
 	switch (internal_notif->type) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index f279fdd..fa97432 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1199,6 +1199,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
 
 static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
 {
+	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
 	/* This looks a bit arbitrary, but the idea is that if we run
 	 * out of possible simultaneous scans and the userspace is
 	 * trying to run a scan type that is already running, we
@@ -1225,12 +1228,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
 			return -EBUSY;
 		return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
 	case IWL_MVM_SCAN_NETDETECT:
-		/* No need to stop anything for net-detect since the
-		 * firmware is restarted anyway.  This way, any sched
-		 * scans that were running will be restarted when we
-		 * resume.
-		*/
-		return 0;
+		/* For non-unified images, there's no need to stop
+		 * anything for net-detect since the firmware is
+		 * restarted anyway.  This way, any sched scans that
+		 * were running will be restarted when we resume.
+		 */
+		if (!unified_image)
+			return 0;
+
+		/* If this is a unified image and we ran out of scans,
+		 * we need to stop something.  Prefer stopping regular
+		 * scans, because the results are useless at this
+		 * point, and we should be able to keep running
+		 * another scheduled scan while suspended.
+		 */
+		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
+			return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
+						 true);
+		if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
+			return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
+						 true);
+
+		/* fall through, something is wrong if no scan was
+		 * running but we ran out of scans.
+		 */
 	default:
 		WARN_ON(1);
 		break;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 001be40..2f8134b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -541,48 +541,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
 
 #ifdef CONFIG_ACPI
-#define SPL_METHOD		"SPLC"
-#define SPL_DOMAINTYPE_MODULE	BIT(0)
-#define SPL_DOMAINTYPE_WIFI	BIT(1)
-#define SPL_DOMAINTYPE_WIGIG	BIT(2)
-#define SPL_DOMAINTYPE_RFEM	BIT(3)
+#define ACPI_SPLC_METHOD	"SPLC"
+#define ACPI_SPLC_DOMAIN_WIFI	(0x07)
 
-static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
+static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
 {
-	union acpi_object *limits, *domain_type, *power_limit;
+	union acpi_object *data_pkg, *dflt_pwr_limit;
+	int i;
 
-	if (splx->type != ACPI_TYPE_PACKAGE ||
-	    splx->package.count != 2 ||
-	    splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
-	    splx->package.elements[0].integer.value != 0) {
-		IWL_ERR(trans, "Unsupported splx structure\n");
+	/* We need at least two elements, one for the revision and one
+	 * for the data itself.  Also check that the revision is
+	 * supported (currently only revision 0).
+	*/
+	if (splc->type != ACPI_TYPE_PACKAGE ||
+	    splc->package.count < 2 ||
+	    splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
+	    splc->package.elements[0].integer.value != 0) {
+		IWL_DEBUG_INFO(trans,
+			       "Unsupported structure returned by the SPLC method.  Ignoring.\n");
 		return 0;
 	}
 
-	limits = &splx->package.elements[1];
-	if (limits->type != ACPI_TYPE_PACKAGE ||
-	    limits->package.count < 2 ||
-	    limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
-	    limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
-		IWL_ERR(trans, "Invalid limits element\n");
+	/* loop through all the packages to find the one for WiFi */
+	for (i = 1; i < splc->package.count; i++) {
+		union acpi_object *domain;
+
+		data_pkg = &splc->package.elements[i];
+
+		/* Skip anything that is not a package with the right
+		 * amount of elements (i.e. at least 2 integers).
+		 */
+		if (data_pkg->type != ACPI_TYPE_PACKAGE ||
+		    data_pkg->package.count < 2 ||
+		    data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+		    data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
+			continue;
+
+		domain = &data_pkg->package.elements[0];
+		if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
+			break;
+
+		data_pkg = NULL;
+	}
+
+	if (!data_pkg) {
+		IWL_DEBUG_INFO(trans,
+			       "No element for the WiFi domain returned by the SPLC method.\n");
 		return 0;
 	}
 
-	domain_type = &limits->package.elements[0];
-	power_limit = &limits->package.elements[1];
-	if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
-		IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
-		return 0;
-	}
-
-	return power_limit->integer.value;
+	dflt_pwr_limit = &data_pkg->package.elements[1];
+	return dflt_pwr_limit->integer.value;
 }
 
 static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
 {
 	acpi_handle pxsx_handle;
 	acpi_handle handle;
-	struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
+	struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
 	acpi_status status;
 
 	pxsx_handle = ACPI_HANDLE(&pdev->dev);
@@ -593,23 +609,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
 	}
 
 	/* Get the method's handle */
-	status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
+	status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
+				 &handle);
 	if (ACPI_FAILURE(status)) {
-		IWL_DEBUG_INFO(trans, "SPL method not found\n");
+		IWL_DEBUG_INFO(trans, "SPLC method not found\n");
 		return;
 	}
 
 	/* Call SPLC with no arguments */
-	status = acpi_evaluate_object(handle, NULL, NULL, &splx);
+	status = acpi_evaluate_object(handle, NULL, NULL, &splc);
 	if (ACPI_FAILURE(status)) {
 		IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
 		return;
 	}
 
-	trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
+	trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
 	IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
 		       trans->dflt_pwr_limit);
-	kfree(splx.pointer);
+	kfree(splc.pointer);
 }
 
 #else /* CONFIG_ACPI */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index e9a278b..5f840f1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -592,6 +592,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
 static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
 			      int slots_num, u32 txq_id)
 {
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int ret;
 
 	txq->need_update = false;
@@ -606,6 +607,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
 		return ret;
 
 	spin_lock_init(&txq->lock);
+
+	if (txq_id == trans_pcie->cmd_queue) {
+		static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
+
+		lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
+	}
+
 	__skb_queue_head_init(&txq->overflow_q);
 
 	/*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e17879d..bf2744e 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 		queue->rx_skbs[id] = skb;
 
 		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
-		BUG_ON((signed short)ref < 0);
+		WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 		queue->grant_rx_ref[id] = ref;
 
 		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
@@ -428,7 +428,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
 	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
 	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
-	BUG_ON((signed short)ref < 0);
+	WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 
 	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
 					gfn, GNTMAP_readonly);
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 0d5c29a..7310a26 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -112,17 +112,17 @@ MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
 
 module_param_named(xeon_b2b_usd_bar4_addr64,
 		   xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
 		 "XEON B2B USD BAR 4 64-bit address");
 
 module_param_named(xeon_b2b_usd_bar4_addr32,
 		   xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
 		 "XEON B2B USD split-BAR 4 32-bit address");
 
 module_param_named(xeon_b2b_usd_bar5_addr32,
 		   xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
 		 "XEON B2B USD split-BAR 5 32-bit address");
 
 module_param_named(xeon_b2b_dsd_bar2_addr64,
@@ -132,17 +132,17 @@ MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
 
 module_param_named(xeon_b2b_dsd_bar4_addr64,
 		   xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
 		 "XEON B2B DSD BAR 4 64-bit address");
 
 module_param_named(xeon_b2b_dsd_bar4_addr32,
 		   xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
 		 "XEON B2B DSD split-BAR 4 32-bit address");
 
 module_param_named(xeon_b2b_dsd_bar5_addr32,
 		   xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
 		 "XEON B2B DSD split-BAR 5 32-bit address");
 
 #ifndef ioread64
@@ -1755,6 +1755,8 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
 					    XEON_B2B_MIN_SIZE);
 		if (!ndev->peer_mmio)
 			return -EIO;
+
+		ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
 	}
 
 	return 0;
@@ -2019,6 +2021,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
 		goto err_mmio;
 	}
 	ndev->peer_mmio = ndev->self_mmio;
+	ndev->peer_addr = pci_resource_start(pdev, 0);
 
 	return 0;
 
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 8601c10..4eb8adb 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -257,7 +257,7 @@ enum {
 #define NTB_QP_DEF_NUM_ENTRIES	100
 #define NTB_LINK_DOWN_TIMEOUT	10
 #define DMA_RETRIES		20
-#define DMA_OUT_RESOURCE_TO	50
+#define DMA_OUT_RESOURCE_TO	msecs_to_jiffies(50)
 
 static void ntb_transport_rxc_db(unsigned long data);
 static const struct ntb_ctx_ops ntb_transport_ops;
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 6a50f20..e75d4fd 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -72,7 +72,7 @@
 #define MAX_THREADS		32
 #define MAX_TEST_SIZE		SZ_1M
 #define MAX_SRCS		32
-#define DMA_OUT_RESOURCE_TO	50
+#define DMA_OUT_RESOURCE_TO	msecs_to_jiffies(50)
 #define DMA_RETRIES		20
 #define SZ_4G			(1ULL << 32)
 #define MAX_SEG_ORDER		20 /* no larger than 1M for kmalloc buffer */
@@ -589,7 +589,7 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
 		return -ENOMEM;
 
 	if (mutex_is_locked(&perf->run_mutex)) {
-		out_off = snprintf(buf, 64, "running\n");
+		out_off = scnprintf(buf, 64, "running\n");
 		goto read_from_buf;
 	}
 
@@ -600,14 +600,14 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
 			break;
 
 		if (pctx->status) {
-			out_off += snprintf(buf + out_off, 1024 - out_off,
+			out_off += scnprintf(buf + out_off, 1024 - out_off,
 					    "%d: error %d\n", i,
 					    pctx->status);
 			continue;
 		}
 
 		rate = div64_u64(pctx->copied, pctx->diff_us);
-		out_off += snprintf(buf + out_off, 1024 - out_off,
+		out_off += scnprintf(buf + out_off, 1024 - out_off,
 			"%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
 			i, pctx->copied, pctx->diff_us, rate);
 	}
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 7d31179..4358611 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -88,7 +88,7 @@ MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer");
 
 static unsigned long db_init = 0x7;
 module_param(db_init, ulong, 0644);
-MODULE_PARM_DESC(delay_ms, "Initial doorbell bits to ring on the peer");
+MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer");
 
 struct pp_ctx {
 	struct ntb_dev			*ntb;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0248d0e..5e52034 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1242,20 +1242,16 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
 
 	result = nvme_enable_ctrl(&dev->ctrl, cap);
 	if (result)
-		goto free_nvmeq;
+		return result;
 
 	nvmeq->cq_vector = 0;
 	result = queue_request_irq(nvmeq);
 	if (result) {
 		nvmeq->cq_vector = -1;
-		goto free_nvmeq;
+		return result;
 	}
 
 	return result;
-
- free_nvmeq:
-	nvme_free_queues(dev, 0);
-	return result;
 }
 
 static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
@@ -1317,10 +1313,8 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
 	max = min(dev->max_qid, dev->queue_count - 1);
 	for (i = dev->online_queues; i <= max; i++) {
 		ret = nvme_create_queue(dev->queues[i], i);
-		if (ret) {
-			nvme_free_queues(dev, i);
+		if (ret)
 			break;
-		}
 	}
 
 	/*
@@ -1460,13 +1454,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
 	result = queue_request_irq(adminq);
 	if (result) {
 		adminq->cq_vector = -1;
-		goto free_queues;
+		return result;
 	}
 	return nvme_create_io_queues(dev);
-
- free_queues:
-	nvme_free_queues(dev, 1);
-	return result;
 }
 
 static void nvme_del_queue_end(struct request *req, int error)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 5a83881..3d25add 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -83,6 +83,7 @@ enum nvme_rdma_queue_flags {
 	NVME_RDMA_Q_CONNECTED = (1 << 0),
 	NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
 	NVME_RDMA_Q_DELETING = (1 << 2),
+	NVME_RDMA_Q_LIVE = (1 << 3),
 };
 
 struct nvme_rdma_queue {
@@ -624,10 +625,18 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
 
 	for (i = 1; i < ctrl->queue_count; i++) {
 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
-		if (ret)
-			break;
+		if (ret) {
+			dev_info(ctrl->ctrl.device,
+				"failed to connect i/o queue: %d\n", ret);
+			goto out_free_queues;
+		}
+		set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
 	}
 
+	return 0;
+
+out_free_queues:
+	nvme_rdma_free_io_queues(ctrl);
 	return ret;
 }
 
@@ -712,6 +721,8 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 	if (ret)
 		goto stop_admin_q;
 
+	set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
+
 	ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
 	if (ret)
 		goto stop_admin_q;
@@ -761,8 +772,10 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 
 	nvme_stop_keep_alive(&ctrl->ctrl);
 
-	for (i = 0; i < ctrl->queue_count; i++)
+	for (i = 0; i < ctrl->queue_count; i++) {
 		clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
+		clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
+	}
 
 	if (ctrl->queue_count > 1)
 		nvme_stop_queues(&ctrl->ctrl);
@@ -1378,6 +1391,24 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
 	return BLK_EH_HANDLED;
 }
 
+/*
+ * We cannot accept any other command until the Connect command has completed.
+ */
+static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
+		struct request *rq)
+{
+	if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
+		struct nvme_command *cmd = (struct nvme_command *)rq->cmd;
+
+		if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
+		    cmd->common.opcode != nvme_fabrics_command ||
+		    cmd->fabrics.fctype != nvme_fabrics_type_connect)
+			return false;
+	}
+
+	return true;
+}
+
 static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 		const struct blk_mq_queue_data *bd)
 {
@@ -1394,6 +1425,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	WARN_ON_ONCE(rq->tag < 0);
 
+	if (!nvme_rdma_queue_is_ready(queue, rq))
+		return BLK_MQ_RQ_QUEUE_BUSY;
+
 	dev = queue->device->dev;
 	ib_dma_sync_single_for_cpu(dev, sqe->dma,
 			sizeof(struct nvme_command), DMA_TO_DEVICE);
@@ -1544,6 +1578,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
 	if (error)
 		goto out_cleanup_queue;
 
+	set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
+
 	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
 	if (error) {
 		dev_err(ctrl->ctrl.device,
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b4cacb6..a21437a 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -838,9 +838,13 @@ static void nvmet_fatal_error_handler(struct work_struct *work)
 
 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
 {
-	ctrl->csts |= NVME_CSTS_CFS;
-	INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
-	schedule_work(&ctrl->fatal_err_work);
+	mutex_lock(&ctrl->lock);
+	if (!(ctrl->csts & NVME_CSTS_CFS)) {
+		ctrl->csts |= NVME_CSTS_CFS;
+		INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
+		schedule_work(&ctrl->fatal_err_work);
+	}
+	mutex_unlock(&ctrl->lock);
 }
 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
 
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index f8d2399..005ef5d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -951,6 +951,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
 
 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
 {
+	ib_drain_qp(queue->cm_id->qp);
 	rdma_destroy_qp(queue->cm_id);
 	ib_free_cq(queue->cq);
 }
@@ -1066,6 +1067,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
 	spin_lock_init(&queue->rsp_wr_wait_lock);
 	INIT_LIST_HEAD(&queue->free_rsps);
 	spin_lock_init(&queue->rsps_lock);
+	INIT_LIST_HEAD(&queue->queue_list);
 
 	queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
 	if (queue->idx < 0) {
@@ -1244,7 +1246,6 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
 
 	if (disconnect) {
 		rdma_disconnect(queue->cm_id);
-		ib_drain_qp(queue->cm_id->qp);
 		schedule_work(&queue->release_work);
 	}
 }
@@ -1269,7 +1270,12 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
 {
 	WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
 
-	pr_err("failed to connect queue\n");
+	mutex_lock(&nvmet_rdma_queue_mutex);
+	if (!list_empty(&queue->queue_list))
+		list_del_init(&queue->queue_list);
+	mutex_unlock(&nvmet_rdma_queue_mutex);
+
+	pr_err("failed to connect queue %d\n", queue->idx);
 	schedule_work(&queue->release_work);
 }
 
@@ -1352,7 +1358,13 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
 	case RDMA_CM_EVENT_ADDR_CHANGE:
 	case RDMA_CM_EVENT_DISCONNECTED:
 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
-		nvmet_rdma_queue_disconnect(queue);
+		/*
+		 * We might end up here when we already freed the qp
+		 * which means queue release sequence is in progress,
+		 * so don't get in the way...
+		 */
+		if (queue)
+			nvmet_rdma_queue_disconnect(queue);
 		break;
 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
 		ret = nvmet_rdma_device_removal(cm_id, queue);
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index 55f453d..c7f3408 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -29,6 +29,11 @@ static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
 	return intel_mid_pci_set_power_state(pdev, state);
 }
 
+static pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
+{
+	return intel_mid_pci_get_power_state(pdev);
+}
+
 static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
 {
 	return PCI_D3hot;
@@ -52,6 +57,7 @@ static bool mid_pci_need_resume(struct pci_dev *dev)
 static struct pci_platform_pm_ops mid_pci_platform_pm = {
 	.is_manageable	= mid_pci_power_manageable,
 	.set_state	= mid_pci_set_power_state,
+	.get_state	= mid_pci_get_power_state,
 	.choose_state	= mid_pci_choose_state,
 	.sleep_wake	= mid_pci_sleep_wake,
 	.run_wake	= mid_pci_run_wake,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 043d2a2..2575878 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -35,7 +35,7 @@
 #include <linux/qcom_iommu.h>
 #include <linux/time.h>
 #include <linux/hashtable.h>
-#include <linux/hash.h>
+#include <linux/jhash.h>
 #include <soc/qcom/subsystem_restart.h>
 #include <soc/qcom/smem.h>
 #include <soc/qcom/scm.h>
@@ -3250,7 +3250,7 @@ void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id,
 	hfound = NULL;
 	memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
 	strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
-	hkey = arch_fast_hash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
+	hkey = jhash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
 			0);
 	hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
 			hentry, list, hkey) {
@@ -4616,6 +4616,9 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 		IPADBG("Initialization of ipa interrupts skipped\n");
 	}
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
+		ipa3_enable_dcd();
+
 	INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
 
 	init_completion(&ipa3_ctx->init_completion_obj);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 7ac26e6..574e81c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -767,6 +767,30 @@ static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
 }
 
 /**
+ * ipa3_transport_irq_cmd_ack_free - callback function which will be
+ * called by SPS/GSI driver after an immediate command is complete.
+ * This function will also free the completion object once it is done.
+ * @tag_comp: pointer to the completion object
+ * @ignored: parameter not used
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa3_send_cmd())
+ */
+static void ipa3_transport_irq_cmd_ack_free(void *tag_comp, int ignored)
+{
+	struct ipa3_tag_completion *comp = tag_comp;
+
+	if (!comp) {
+		IPAERR("comp is NULL\n");
+		return;
+	}
+
+	complete(&comp->comp);
+	if (atomic_dec_return(&comp->cnt) == 0)
+		kfree(comp);
+}
+
+/**
  * ipa3_send_cmd - send immediate commands
  * @num_desc:	number of descriptors within the desc struct
  * @descr:	descriptor structure
@@ -778,7 +802,58 @@ static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
  */
 int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
 {
-	return ipa3_send_cmd_timeout(num_desc, descr, 0);
+	struct ipa3_desc *desc;
+	int i, result = 0;
+	struct ipa3_sys_context *sys;
+	int ep_idx;
+
+	for (i = 0; i < num_desc; i++)
+		IPADBG("sending imm cmd %d\n", descr[i].opcode);
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	if (-1 == ep_idx) {
+		IPAERR("Client %u is not mapped\n",
+			IPA_CLIENT_APPS_CMD_PROD);
+		return -EFAULT;
+	}
+
+	sys = ipa3_ctx->ep[ep_idx].sys;
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	if (num_desc == 1) {
+		init_completion(&descr->xfer_done);
+
+		if (descr->callback || descr->user1)
+			WARN_ON(1);
+
+		descr->callback = ipa3_transport_irq_cmd_ack;
+		descr->user1 = descr;
+		if (ipa3_send_one(sys, descr, true)) {
+			IPAERR("fail to send immediate command\n");
+			result = -EFAULT;
+			goto bail;
+		}
+		wait_for_completion(&descr->xfer_done);
+	} else {
+		desc = &descr[num_desc - 1];
+		init_completion(&desc->xfer_done);
+
+		if (desc->callback || desc->user1)
+			WARN_ON(1);
+
+		desc->callback = ipa3_transport_irq_cmd_ack;
+		desc->user1 = desc;
+		if (ipa3_send(sys, num_desc, descr, true)) {
+			IPAERR("fail to send multiple immediate command set\n");
+			result = -EFAULT;
+			goto bail;
+		}
+		wait_for_completion(&desc->xfer_done);
+	}
+
+bail:
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return result;
 }
 
 /**
@@ -800,6 +875,7 @@ int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
 	struct ipa3_sys_context *sys;
 	int ep_idx;
 	int completed;
+	struct ipa3_tag_completion *comp;
 
 	for (i = 0; i < num_desc; i++)
 		IPADBG("sending imm cmd %d\n", descr[i].opcode);
@@ -810,55 +886,56 @@ int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
 			IPA_CLIENT_APPS_CMD_PROD);
 		return -EFAULT;
 	}
+
+	comp = kzalloc(sizeof(*comp), GFP_ATOMIC);
+	if (!comp) {
+		IPAERR("no mem\n");
+		return -ENOMEM;
+	}
+	init_completion(&comp->comp);
+
+	/* completion needs to be released from both here and in ack callback */
+	atomic_set(&comp->cnt, 2);
+
 	sys = ipa3_ctx->ep[ep_idx].sys;
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 
 	if (num_desc == 1) {
-		init_completion(&descr->xfer_done);
-
 		if (descr->callback || descr->user1)
 			WARN_ON(1);
 
-		descr->callback = ipa3_transport_irq_cmd_ack;
-		descr->user1 = descr;
+		descr->callback = ipa3_transport_irq_cmd_ack_free;
+		descr->user1 = comp;
 		if (ipa3_send_one(sys, descr, true)) {
 			IPAERR("fail to send immediate command\n");
+			kfree(comp);
 			result = -EFAULT;
 			goto bail;
 		}
-		if (timeout) {
-			completed = wait_for_completion_timeout(
-				&descr->xfer_done, msecs_to_jiffies(timeout));
-			if (!completed)
-				IPADBG("timeout waiting for imm-cmd ACK\n");
-		} else {
-			wait_for_completion(&descr->xfer_done);
-		}
 	} else {
 		desc = &descr[num_desc - 1];
-		init_completion(&desc->xfer_done);
 
 		if (desc->callback || desc->user1)
 			WARN_ON(1);
 
-		desc->callback = ipa3_transport_irq_cmd_ack;
-		desc->user1 = desc;
+		desc->callback = ipa3_transport_irq_cmd_ack_free;
+		desc->user1 = comp;
 		if (ipa3_send(sys, num_desc, descr, true)) {
 			IPAERR("fail to send multiple immediate command set\n");
+			kfree(comp);
 			result = -EFAULT;
 			goto bail;
 		}
-		if (timeout) {
-			completed = wait_for_completion_timeout(
-				&desc->xfer_done, msecs_to_jiffies(timeout));
-			if (!completed)
-				IPADBG("timeout waiting for imm-cmd ACK\n");
-		} else {
-			wait_for_completion(&desc->xfer_done);
-		}
-
 	}
 
+	completed = wait_for_completion_timeout(
+		&comp->comp, msecs_to_jiffies(timeout));
+	if (!completed)
+		IPADBG("timeout waiting for imm-cmd ACK\n");
+
+	if (atomic_dec_return(&comp->cnt) == 0)
+		kfree(comp);
+
 bail:
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	return result;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index db1f52c..d0c5c9d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -2031,4 +2031,5 @@ int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats);
 struct dentry *ipa_debugfs_get_root(void);
 bool ipa3_is_msm_device(void);
 struct device *ipa3_get_pdev(void);
+void ipa3_enable_dcd(void);
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 15449b4..2564b90 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -98,35 +98,60 @@
 	{-1, -1, false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR}
 
 /* Resource Group index*/
-#define IPA_GROUP_UL		(0)
-#define IPA_GROUP_DL		(1)
-#define IPA_GROUP_DPL		IPA_GROUP_DL
-#define IPA_GROUP_DIAG		(2)
-#define IPA_GROUP_DMA		(3)
-#define IPA_GROUP_IMM_CMD	IPA_GROUP_UL
-#define IPA_GROUP_Q6ZIP		(4)
-#define IPA_GROUP_Q6ZIP_GENERAL	IPA_GROUP_Q6ZIP
-#define IPA_GROUP_UC_RX_Q	(5)
-#define IPA_GROUP_Q6ZIP_ENGINE	IPA_GROUP_UC_RX_Q
-#define IPA_GROUP_MAX		(6)
+#define IPA_v3_0_GROUP_UL		(0)
+#define IPA_v3_0_GROUP_DL		(1)
+#define IPA_v3_0_GROUP_DPL		IPA_v3_0_GROUP_DL
+#define IPA_v3_0_GROUP_DIAG		(2)
+#define IPA_v3_0_GROUP_DMA		(3)
+#define IPA_v3_0_GROUP_IMM_CMD		IPA_v3_0_GROUP_UL
+#define IPA_v3_0_GROUP_Q6ZIP		(4)
+#define IPA_v3_0_GROUP_Q6ZIP_GENERAL	IPA_v3_0_GROUP_Q6ZIP
+#define IPA_v3_0_GROUP_UC_RX_Q		(5)
+#define IPA_v3_0_GROUP_Q6ZIP_ENGINE	IPA_v3_0_GROUP_UC_RX_Q
+#define IPA_v3_0_GROUP_MAX		(6)
+
+#define IPA_v3_5_1_GROUP_LWA_DL	(0)
+#define IPA_v3_5_1_GROUP_UL_DL		(1)
+#define IPA_v3_5_1_GROUP_DMA		(2)
+#define IPA_v3_5_1_GROUP_UC_RX_Q	(3)
+#define IPA_v3_5_1_SRC_GROUP_MAX	(4)
+#define IPA_v3_5_1_DST_GROUP_MAX	(3)
+
+#define IPA_GROUP_MAX IPA_v3_0_GROUP_MAX
 
 enum ipa_rsrc_grp_type_src {
-	IPA_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS,
-	IPA_RSRC_GRP_TYPE_SRC_HDR_SECTORS,
-	IPA_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER,
-	IPA_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS,
-	IPA_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
-	IPA_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS,
-	IPA_RSRC_GRP_TYPE_SRC_HPS_DMARS,
-	IPA_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
-	IPA_RSRC_GRP_TYPE_SRC_MAX,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_HDR_SECTORS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER,
+	IPA_v3_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_HPS_DMARS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX,
+
+	IPA_v3_5_1_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS = 0,
+	IPA_v3_5_1_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS,
+	IPA_v3_5_1_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
+	IPA_v3_5_1_RSRC_GRP_TYPE_SRC_HPS_DMARS,
+	IPA_v3_5_1_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
+	IPA_v3_5_1_RSRC_GRP_TYPE_SRC_MAX
 };
+
+#define IPA_RSRC_GRP_TYPE_SRC_MAX IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX
+
 enum ipa_rsrc_grp_type_dst {
-	IPA_RSRC_GRP_TYPE_DST_DATA_SECTORS,
-	IPA_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS,
-	IPA_RSRC_GRP_TYPE_DST_DPS_DMARS,
-	IPA_RSRC_GRP_TYPE_DST_MAX,
+	IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTORS,
+	IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS,
+	IPA_v3_0_RSRC_GRP_TYPE_DST_DPS_DMARS,
+	IPA_v3_0_RSRC_GRP_TYPE_DST_MAX,
+
+	IPA_v3_5_1_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0,
+	IPA_v3_5_1_RSRC_GRP_TYPE_DST_DPS_DMARS,
+	IPA_v3_5_1_RSRC_GRP_TYPE_DST_MAX,
 };
+#define IPA_RSRC_GRP_TYPE_DST_MAX IPA_v3_0_RSRC_GRP_TYPE_DST_MAX
+
 enum ipa_rsrc_grp_type_rx {
 	IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ,
 	IPA_RSRC_GRP_TYPE_RX_MAX
@@ -136,48 +161,82 @@ struct rsrc_min_max {
 	u32 max;
 };
 
-static const struct rsrc_min_max ipa3_rsrc_src_grp_config
-			[IPA_RSRC_GRP_TYPE_SRC_MAX][IPA_GROUP_MAX] = {
-		/*UL	DL	DIAG	DMA	Not Used	uC Rx*/
-	[IPA_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
-		{3, 255}, {3, 255}, {1, 255}, {1, 255}, {1, 255}, {2, 255} },
-	[IPA_RSRC_GRP_TYPE_SRC_HDR_SECTORS] = {
-		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
-	[IPA_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER] = {
-		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
-	[IPA_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
-		{14, 14}, {16, 16}, {5, 5}, {5, 5},  {0, 0}, {8, 8} },
-	[IPA_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
-		{19, 19}, {26, 26}, {3, 3}, {7, 7}, {0, 0}, {8, 8} },
-	[IPA_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS] = {
-		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
-	[IPA_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
-		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
-	[IPA_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
-		{14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} },
-};
-static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
-			[IPA_RSRC_GRP_TYPE_DST_MAX][IPA_GROUP_MAX] = {
-		/*UL	DL/DPL	DIAG	DMA  Q6zip_gen Q6zip_eng*/
-	[IPA_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
-		{2, 2}, {3, 3}, {0, 0}, {2, 2}, {3, 3}, {3, 3} },
-	[IPA_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS] = {
-		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
-	[IPA_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
-		{1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 0} },
-};
-static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
-			[IPA_RSRC_GRP_TYPE_RX_MAX][IPA_GROUP_MAX] = {
-		/*UL	DL	DIAG	DMA	Not Used	uC Rx*/
-	[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
-		{16, 16}, {24, 24}, {8, 8}, {8, 8}, {0, 0}, {8, 8} },
-};
-
 enum ipa_ver {
 	IPA_3_0,
+	IPA_3_5,
+	IPA_3_5_1,
 	IPA_VER_MAX,
 };
 
+static const struct rsrc_min_max ipa3_rsrc_src_grp_config
+	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_SRC_MAX][IPA_GROUP_MAX] = {
+	[IPA_3_0] = {
+		/*UL	DL	DIAG	DMA	Not Used	uC Rx*/
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{3, 255}, {3, 255}, {1, 255}, {1, 255}, {1, 255}, {2, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_HDR_SECTORS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
+		{14, 14}, {16, 16}, {5, 5}, {5, 5},  {0, 0}, {8, 8} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{19, 19}, {26, 26}, {3, 3}, {7, 7}, {0, 0}, {8, 8} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} },
+	},
+	[IPA_3_5_1] = {
+		/* LWA_DL  UL_DL    not used  UC_RX_Q, other are invalid */
+		[IPA_v3_5_1_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{1, 255}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} },
+		[IPA_v3_5_1_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
+		{10, 10}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v3_5_1_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{12, 12}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v3_5_1_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255},  {0, 0}, {0, 0} },
+		[IPA_v3_5_1_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} },
+	}
+};
+
+static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
+	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_DST_MAX][IPA_GROUP_MAX] = {
+	[IPA_3_0] = {
+			/*UL	DL/DPL	DIAG	DMA  Q6zip_gen Q6zip_eng*/
+		[IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{2, 2}, {3, 3}, {0, 0}, {2, 2}, {3, 3}, {3, 3} },
+		[IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 0} },
+	},
+	[IPA_3_5_1] = {
+			/*LWA_DL UL/DL/DPL not used, other are invalid */
+		[IPA_v3_5_1_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v3_5_1_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} },
+	}
+};
+static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
+	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_RX_MAX][IPA_GROUP_MAX] = {
+		[IPA_3_0] = {
+		/*UL	DL	DIAG	DMA	Unused	uC Rx*/
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{16, 16}, {24, 24}, {8, 8}, {8, 8}, {0, 0}, {8, 8} },
+		},
+		[IPA_3_5_1] = {
+		/* LWA_DL UL_DL	not used UC_RX_Q, other are invalid */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
+		},
+};
+
 struct ipa_ep_configuration {
 	int pipe_num;
 	int group_num;
@@ -189,7 +248,8 @@ struct ipa_ep_configuration {
 static const struct ipa_ep_configuration ipa3_ep_mapping
 					[IPA_VER_MAX][IPA_CLIENT_MAX] = {
 	[IPA_3_0][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_WLAN1_PROD]          = {10, IPA_GROUP_UL, true,
+	[IPA_3_0][IPA_CLIENT_WLAN1_PROD]          = {
+			10, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
@@ -199,187 +259,437 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 	[IPA_3_0][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_USB_PROD]            = {1, IPA_GROUP_UL, true,
+	[IPA_3_0][IPA_CLIENT_USB_PROD]            = {
+			1, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_UC_USB_PROD]         = {2, IPA_GROUP_UL, true,
+	[IPA_3_0][IPA_CLIENT_UC_USB_PROD]         = {
+			2, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_APPS_LAN_WAN_PROD]   = {14, IPA_GROUP_UL, true,
+	[IPA_3_0][IPA_CLIENT_APPS_LAN_WAN_PROD]   = {
+			14, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_APPS_CMD_PROD]
-			= {22, IPA_GROUP_IMM_CMD, false,
+			= {22, IPA_v3_0_GROUP_IMM_CMD, false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_ODU_PROD]            = {12, IPA_GROUP_UL, true,
+	[IPA_3_0][IPA_CLIENT_ODU_PROD]            = {
+			12, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_MHI_PROD]            = {0, IPA_GROUP_UL, true,
+	[IPA_3_0][IPA_CLIENT_MHI_PROD]            = {
+			0, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_PCIE},
-	[IPA_3_0][IPA_CLIENT_Q6_LAN_PROD]         = {9, IPA_GROUP_UL, false,
+	[IPA_3_0][IPA_CLIENT_Q6_LAN_PROD]         = {
+			9, IPA_v3_0_GROUP_UL, false,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_Q6_WAN_PROD]         = {5, IPA_GROUP_DL,
+	[IPA_3_0][IPA_CLIENT_Q6_WAN_PROD]         = {
+			5, IPA_v3_0_GROUP_DL,
 			true, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_Q6_CMD_PROD]
-			= {6, IPA_GROUP_IMM_CMD, false,
+			= {6, IPA_v3_0_GROUP_IMM_CMD, false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_Q6_DECOMP_PROD]      = {7, IPA_GROUP_Q6ZIP,
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP_PROD]      = {7, IPA_v3_0_GROUP_Q6ZIP,
 			false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_Q6_DECOMP2_PROD]     = {8, IPA_GROUP_Q6ZIP,
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP2_PROD]     = {8, IPA_v3_0_GROUP_Q6ZIP,
 			false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
-			= {12, IPA_GROUP_DMA, false,
+			= {12, IPA_v3_0_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_PCIE},
 	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
-			= {13, IPA_GROUP_DMA, false,
+			= {13, IPA_v3_0_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_PCIE},
 	/* Only for test purpose */
-	[IPA_3_0][IPA_CLIENT_TEST_PROD]           = {1, IPA_GROUP_UL, true,
+	[IPA_3_0][IPA_CLIENT_TEST_PROD]           = {
+			1, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_TEST1_PROD]          = {1, IPA_GROUP_UL, true,
+	[IPA_3_0][IPA_CLIENT_TEST1_PROD]          = {
+			1, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_TEST2_PROD]          = {3, IPA_GROUP_UL, true,
+	[IPA_3_0][IPA_CLIENT_TEST2_PROD]          = {
+			3, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_TEST3_PROD]          = {12, IPA_GROUP_UL, true,
+	[IPA_3_0][IPA_CLIENT_TEST3_PROD]          = {
+			12, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_TEST4_PROD]          = {13, IPA_GROUP_UL, true,
+	[IPA_3_0][IPA_CLIENT_TEST4_PROD]          = {
+			13, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
 
 	[IPA_3_0][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_WLAN1_CONS]          = {25, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_WLAN1_CONS]          = {
+			25, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_WLAN2_CONS]          = {27, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_WLAN2_CONS]          = {
+			27, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_WLAN3_CONS]          = {28, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_WLAN3_CONS]          = {
+			28, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_WLAN4_CONS]          = {29, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_WLAN4_CONS]          = {
+			29, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_USB_CONS]            = {26, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_USB_CONS]            = {
+			26, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_USB_DPL_CONS]        = {17, IPA_GROUP_DPL, false,
+	[IPA_3_0][IPA_CLIENT_USB_DPL_CONS]        = {
+			17, IPA_v3_0_GROUP_DPL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_APPS_LAN_CONS]       = {15, IPA_GROUP_UL, false,
+	[IPA_3_0][IPA_CLIENT_APPS_LAN_CONS]       = {
+			15, IPA_v3_0_GROUP_UL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_APPS_WAN_CONS]       = {16, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_APPS_WAN_CONS]       = {
+			16, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_ODU_EMB_CONS]        = {23, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_ODU_EMB_CONS]        = {
+			23, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_MHI_CONS]            = {23, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_MHI_CONS]            = {
+			23, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE},
-	[IPA_3_0][IPA_CLIENT_Q6_LAN_CONS]         = {19, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_Q6_LAN_CONS]         = {
+			19, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_Q6_WAN_CONS]         = {18, IPA_GROUP_UL, false,
+	[IPA_3_0][IPA_CLIENT_Q6_WAN_CONS]         = {
+			18, IPA_v3_0_GROUP_UL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_Q6_DUN_CONS]         = {30, IPA_GROUP_DIAG,
+	[IPA_3_0][IPA_CLIENT_Q6_DUN_CONS]         = {
+			30, IPA_v3_0_GROUP_DIAG,
 			false, IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_Q6_DECOMP_CONS]
-			= {21, IPA_GROUP_Q6ZIP, false,
+			= {21, IPA_v3_0_GROUP_Q6ZIP, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_Q6_DECOMP2_CONS]
-			= {4, IPA_GROUP_Q6ZIP, false,
+			= {4, IPA_v3_0_GROUP_Q6ZIP, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
 	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
-			= {28, IPA_GROUP_DMA, false,
+			= {28, IPA_v3_0_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE},
 	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
-			= {29, IPA_GROUP_DMA, false,
+			= {29, IPA_v3_0_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE},
 	[IPA_3_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]     = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
-	[IPA_3_0][IPA_CLIENT_TEST_CONS]           = {26, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_TEST_CONS]           = {
+			26, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_TEST1_CONS]          = {26, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_TEST1_CONS]          = {
+			26, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_TEST2_CONS]          = {27, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_TEST2_CONS]          = {
+			27, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_TEST3_CONS]          = {28, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_TEST3_CONS]          = {
+			28, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
-	[IPA_3_0][IPA_CLIENT_TEST4_CONS]          = {29, IPA_GROUP_DL, false,
+	[IPA_3_0][IPA_CLIENT_TEST4_CONS]          = {
+			29, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+
+	/* IPA_3_5_1 */
+	[IPA_3_5_1][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_WLAN1_PROD]          = {
+			7, IPA_v3_5_1_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB_PROD]            = {
+			0, IPA_v3_5_1_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_UC_USB_PROD]         = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_APPS_LAN_WAN_PROD]   = {
+			8, IPA_v3_5_1_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_APPS_CMD_PROD]		= {
+			5, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_ODU_PROD]            = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_MHI_PROD]            = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_Q6_LAN_PROD]         = {
+			3, IPA_v3_5_1_GROUP_UL_DL,
+			true, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR },
+	[IPA_3_5_1][IPA_CLIENT_Q6_WAN_PROD]         = {
+			6, IPA_v3_5_1_GROUP_UL_DL,
+			true, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_Q6_CMD_PROD]
+			= {4, IPA_v3_5_1_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP_PROD]      = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
+	/* Only for test purpose */
+	[IPA_3_5_1][IPA_CLIENT_TEST_PROD]           = {
+			0, IPA_v3_5_1_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST1_PROD]          = {
+			0, IPA_v3_5_1_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST2_PROD]          = {
+			2, IPA_v3_5_1_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST3_PROD]          = {
+			4, IPA_v3_5_1_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST4_PROD]          = {
+			1, IPA_v3_5_1_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+
+	[IPA_3_5_1][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_WLAN1_CONS]          = {
+			16, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_WLAN2_CONS]          =  {
+			18, IPA_v3_5_1_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_WLAN3_CONS]          =  {
+			19, IPA_v3_5_1_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_WLAN4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB_CONS]            = {
+			17, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_USB_DPL_CONS]        = {
+			11, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_APPS_LAN_CONS]       = {
+			9, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_APPS_WAN_CONS]       = {
+			10, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_ODU_EMB_CONS]        = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_MHI_CONS]            = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_Q6_LAN_CONS]         = {
+			13, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_Q6_WAN_CONS]         = {
+			12, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_Q6_DUN_CONS]         = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP_CONS]		= IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP2_CONS]		= IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = IPA_CLIENT_NOT_USED,
+	/* Only for test purpose */
+	[IPA_3_5_1][IPA_CLIENT_TEST_CONS]           = {
+			17, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST1_CONS]          = {
+			17, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST2_CONS]          = {
+			18, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST3_CONS]          = {
+			19, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST4_CONS]          = {
+			11, IPA_v3_5_1_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR},
 };
 
-/* this array include information tuple:
- * {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee}
- */
-static struct ipa_gsi_ep_config ipa_gsi_ep_info[] = {
-	{0, 0, 8, 16, 0},
-	{1, 3, 8, 16, 0},
-	{3, 5, 16, 32, 0},
-	{4, 9, 4, 4, 1},
-	{5, 0, 16, 32, 1},
-	{6, 1, 18, 28, 1},
-	{7, 2, 0, 0, 1},
-	{8, 3, 0, 0, 1},
-	{9, 4, 8, 12, 1},
-	{10, 1, 8, 16, 3},
-	{12, 9, 8, 16, 0},
-	{13, 10, 8, 16, 0},
-	{14, 11, 8, 16, 0},
-	{15, 7, 8, 12, 0},
-	{16, 8, 8, 12, 0},
-	{17, 2, 8, 12, 0},
-	{18, 5, 8, 12, 1},
-	{19, 6, 8, 12, 1},
-	{21, 8, 4, 4, 1},
-	{22, 6, 18, 28, 0},
-	{23, 1, 8, 8, 0},
-	{25, 4, 8, 8, 3},
-	{26, 12, 8, 8, 0},
-	{27, 4, 8, 8, 0},
-	{28, 13, 8, 8, 0},
-	{29, 14, 8, 8, 0},
-	{30, 7, 4, 4, 1},
-	{-1, -1, -1, -1, -1}
+enum ipa_ees {
+	IPA_EE_AP = 0,
+	IPA_EE_Q6 = 1,
+	IPA_EE_UC = 3,
+};
+
+static struct ipa_gsi_ep_config
+	ipa_gsi_ep_info[IPA_VER_MAX][IPA3_MAX_NUM_PIPES] = {
+		/* IPA_3_0 - valid also for IPAv3.1 */
+	[IPA_3_0] = {
+	/* {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee} */
+		{0, 0, 8, 16, IPA_EE_AP},
+		{1, 3, 8, 16, IPA_EE_AP},
+		{3, 5, 16, 32, IPA_EE_AP},
+		{4, 9, 4, 4, IPA_EE_Q6},
+		{5, 0, 16, 32, IPA_EE_Q6},
+		{6, 1, 18, 28, IPA_EE_Q6},
+		{7, 2, 0, 0, IPA_EE_Q6},
+		{8, 3, 0, 0, IPA_EE_Q6},
+		{9, 4, 8, 12, IPA_EE_Q6},
+		{10, 1, 8, 16, IPA_EE_UC},
+		{12, 9, 8, 16, IPA_EE_AP},
+		{13, 10, 8, 16, IPA_EE_AP},
+		{14, 11, 8, 16, IPA_EE_AP},
+		{15, 7, 8, 12, IPA_EE_AP},
+		{16, 8, 8, 12, IPA_EE_AP},
+		{17, 2, 8, 12, IPA_EE_AP},
+		{18, 5, 8, 12, IPA_EE_Q6},
+		{19, 6, 8, 12, IPA_EE_Q6},
+		{21, 8, 4, 4, IPA_EE_Q6},
+		{22, 6, 18, 28, IPA_EE_AP},
+		{23, 1, 8, 8, IPA_EE_AP},
+		{25, 4, 8, 8, IPA_EE_UC},
+		{26, 12, 8, 8, IPA_EE_AP},
+		{27, 4, 8, 8, IPA_EE_AP},
+		{28, 13, 8, 8, IPA_EE_AP},
+		{29, 14, 8, 8, IPA_EE_AP},
+		{30, 7, 4, 4, IPA_EE_Q6},
+		{-1, -1, -1, -1, -1}
+	},
+	[IPA_3_5] = {
+	/* {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee} */
+		{0, 7, 8, 16, IPA_EE_AP},
+		{1, 0, 8, 16, IPA_EE_UC},
+		{2, 3, 16, 32, IPA_EE_AP},
+		{3, 0, 16, 32, IPA_EE_Q6},
+		{4, 1, 20, 23, IPA_EE_Q6},
+		{5, 4, 20, 23, IPA_EE_AP},
+		{6, 4, 12, 30, IPA_EE_Q6},
+		{7, 1, 8, 16, IPA_EE_UC},
+		{8, 9, 8, 16, IPA_EE_AP},
+		{9, 5, 8, 12, IPA_EE_AP},
+		{10, 6, 8, 12, IPA_EE_AP},
+		{11, 2, 4, 6, IPA_EE_AP},
+		{12, 2, 8, 12, IPA_EE_Q6},
+		{13, 3, 8, 12, IPA_EE_Q6},
+		{14, 10, 4, 6, IPA_EE_AP},
+		{15, 2, 8, 8, IPA_EE_UC},
+		{16, 3, 8, 8, IPA_EE_UC},
+		{17, 11, 8, 8, IPA_EE_AP},
+		{18, 12, 8, 8, IPA_EE_AP},
+		{19, 13, 8, 8, IPA_EE_AP},
+		{-1, -1, -1, -1, -1}
+	},
+	[IPA_3_5_1] = {
+	/* {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee} */
+		{0, 0, 8, 16, IPA_EE_AP},
+		{1, 0, 8, 16, IPA_EE_UC},
+		{2, 3, 16, 32, IPA_EE_AP},
+		{3, 0, 16, 32, IPA_EE_Q6},
+		{4, 1, 20, 23, IPA_EE_Q6},
+		{5, 4, 20, 23, IPA_EE_AP},
+		{6, 4, 12, 30, IPA_EE_Q6},
+		{7, 1, 8, 16, IPA_EE_UC},
+		{8, 7, 8, 16, IPA_EE_AP},
+		{9, 5, 8, 12, IPA_EE_AP},
+		{10, 6, 8, 12, IPA_EE_AP},
+		{11, 2, 4, 6, IPA_EE_AP},
+		{12, 2, 8, 12, IPA_EE_Q6},
+		{13, 3, 8, 12, IPA_EE_Q6},
+		{14, 5, 8, 8, IPA_EE_Q6},
+		{15, 2, 8, 8, IPA_EE_UC},
+		{16, 3, 8, 8, IPA_EE_UC},
+		{17, 8, 8, 8, IPA_EE_AP},
+		{18, 9, 8, 8, IPA_EE_AP},
+		{19, 10, 8, 8, IPA_EE_AP},
+		{-1, -1, -1, -1, -1}
+	},
 };
 
 static struct msm_bus_vectors ipa_init_vectors_v3_0[]  = {
@@ -882,7 +1192,7 @@ int ipa3_init_hw(void)
 
 /**
  * ipa3_get_hw_type_index() - Get HW type index which is used as the entry index
- *	into ipa3_ep_mapping[] array.
+ *	for ep\resource groups related arrays .
  *
  * Return value: HW type index
  */
@@ -895,6 +1205,12 @@ u8 ipa3_get_hw_type_index(void)
 	case IPA_HW_v3_1:
 		hw_type_index = IPA_3_0;
 		break;
+	case IPA_HW_v3_5:
+		hw_type_index = IPA_3_5;
+		break;
+	case IPA_HW_v3_5_1:
+		hw_type_index = IPA_3_5_1;
+		break;
 	default:
 		IPAERR("Incorrect IPA version %d\n", ipa3_ctx->ipa_hw_type);
 		hw_type_index = IPA_3_0;
@@ -929,14 +1245,17 @@ int ipa3_get_ep_mapping(enum ipa_client_type client)
 struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx)
 {
 	int i;
+	u8 hw_index;
+
+	hw_index = ipa3_get_hw_type_index();
 
 	for (i = 0; ; i++) {
-		if (ipa_gsi_ep_info[i].ipa_ep_num < 0)
+		if (ipa_gsi_ep_info[hw_index][i].ipa_ep_num < 0)
 			break;
 
-		if (ipa_gsi_ep_info[i].ipa_ep_num ==
+		if (ipa_gsi_ep_info[hw_index][i].ipa_ep_num ==
 			ipa_ep_idx)
-			return &(ipa_gsi_ep_info[i]);
+			return &(ipa_gsi_ep_info[hw_index][i]);
 	}
 
 	return NULL;
@@ -3236,59 +3555,112 @@ bool ipa_is_modem_pipe(int pipe_idx)
 static void ipa3_write_rsrc_grp_type_reg(int group_index,
 			enum ipa_rsrc_grp_type_src n, bool src,
 			struct ipahal_reg_rsrc_grp_cfg *val) {
+	u8 hw_type_idx;
 
-	if (src) {
-		switch (group_index) {
-		case IPA_GROUP_UL:
-		case IPA_GROUP_DL:
-			ipahal_write_reg_n_fields(
-				IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
-				n, val);
-			break;
-		case IPA_GROUP_DIAG:
-		case IPA_GROUP_DMA:
-			ipahal_write_reg_n_fields(
-				IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
-				n, val);
-			break;
-		case IPA_GROUP_Q6ZIP:
-		case IPA_GROUP_UC_RX_Q:
-			ipahal_write_reg_n_fields(
-				IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
-				n, val);
-			break;
-		default:
-			IPAERR(
-			" Invalid source resource group,index #%d\n",
-			group_index);
-			break;
+	hw_type_idx = ipa3_get_hw_type_index();
+
+	switch (hw_type_idx) {
+	case IPA_3_0:
+		if (src) {
+			switch (group_index) {
+			case IPA_v3_0_GROUP_UL:
+			case IPA_v3_0_GROUP_DL:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_0_GROUP_DIAG:
+			case IPA_v3_0_GROUP_DMA:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_0_GROUP_Q6ZIP:
+			case IPA_v3_0_GROUP_UC_RX_Q:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid source resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		} else {
+			switch (group_index) {
+			case IPA_v3_0_GROUP_UL:
+			case IPA_v3_0_GROUP_DL:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_0_GROUP_DIAG:
+			case IPA_v3_0_GROUP_DMA:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_0_GROUP_Q6ZIP_GENERAL:
+			case IPA_v3_0_GROUP_Q6ZIP_ENGINE:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid destination resource group,index #%d\n",
+				group_index);
+				break;
+			}
 		}
-	} else {
-		switch (group_index) {
-		case IPA_GROUP_UL:
-		case IPA_GROUP_DL:
-			ipahal_write_reg_n_fields(
-				IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
-				n, val);
-			break;
-		case IPA_GROUP_DIAG:
-		case IPA_GROUP_DMA:
-			ipahal_write_reg_n_fields(
-				IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
-				n, val);
-			break;
-		case IPA_GROUP_Q6ZIP_GENERAL:
-		case IPA_GROUP_Q6ZIP_ENGINE:
-			ipahal_write_reg_n_fields(
-				IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
-				n, val);
-			break;
-		default:
-			IPAERR(
-			" Invalid destination resource group,index #%d\n",
-			group_index);
-			break;
+		break;
+	case IPA_3_5_1:
+		if (src) {
+			switch (group_index) {
+			case IPA_v3_5_1_GROUP_LWA_DL:
+			case IPA_v3_5_1_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_5_1_GROUP_DMA:
+			case IPA_v3_5_1_GROUP_UC_RX_Q:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid source resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		} else {
+			switch (group_index) {
+			case IPA_v3_5_1_GROUP_LWA_DL:
+			case IPA_v3_5_1_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_5_1_GROUP_DMA:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid destination resource group,index #%d\n",
+				group_index);
+				break;
+			}
 		}
+		break;
+	default:
+		IPAERR("invalid hw type\n");
+		WARN_ON(1);
+		return;
 	}
 }
 
@@ -3296,6 +3668,9 @@ static void ipa3_configure_rx_hps_clients(int depth, bool min)
 {
 	int i;
 	struct ipahal_reg_rx_hps_clients val;
+	u8 hw_type_idx;
+
+	hw_type_idx = ipa3_get_hw_type_index();
 
 	/*
 	 * depth 0 contains 4 first clients out of 6
@@ -3305,11 +3680,13 @@ static void ipa3_configure_rx_hps_clients(int depth, bool min)
 		if (min)
 			val.client_minmax[i] =
 				ipa3_rsrc_rx_grp_config
+				[hw_type_idx]
 				[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
 				[!depth ? i : 4 + i].min;
 		else
 			val.client_minmax[i] =
 				ipa3_rsrc_rx_grp_config
+				[hw_type_idx]
 				[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
 				[!depth ? i : 4 + i].max;
 	}
@@ -3328,29 +3705,62 @@ void ipa3_set_resorce_groups_min_max_limits(void)
 {
 	int i;
 	int j;
+	int src_rsrc_type_max;
+	int dst_rsrc_type_max;
+	int src_grp_idx_max;
+	int dst_grp_idx_max;
 	struct ipahal_reg_rsrc_grp_cfg val;
+	u8 hw_type_idx;
 
 	IPADBG("ENTER\n");
 	IPADBG("Assign source rsrc groups min-max limits\n");
 
-	for (i = 0; i < IPA_RSRC_GRP_TYPE_SRC_MAX; i++) {
-		for (j = 0; j < IPA_GROUP_MAX; j = j + 2) {
-			val.x_min = ipa3_rsrc_src_grp_config[i][j].min;
-			val.x_max = ipa3_rsrc_src_grp_config[i][j].max;
-			val.y_min = ipa3_rsrc_src_grp_config[i][j + 1].min;
-			val.y_max = ipa3_rsrc_src_grp_config[i][j + 1].max;
+	hw_type_idx = ipa3_get_hw_type_index();
+	switch (hw_type_idx) {
+	case IPA_3_0:
+		src_rsrc_type_max = IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX;
+		dst_rsrc_type_max = IPA_v3_0_RSRC_GRP_TYPE_DST_MAX;
+		src_grp_idx_max = IPA_v3_0_GROUP_MAX;
+		dst_grp_idx_max = IPA_v3_0_GROUP_MAX;
+		break;
+	case IPA_3_5_1:
+		src_rsrc_type_max = IPA_v3_5_1_RSRC_GRP_TYPE_SRC_MAX;
+		dst_rsrc_type_max = IPA_v3_5_1_RSRC_GRP_TYPE_DST_MAX;
+		src_grp_idx_max = IPA_v3_5_1_SRC_GROUP_MAX;
+		dst_grp_idx_max = IPA_v3_5_1_DST_GROUP_MAX;
+		break;
+	default:
+		IPAERR("invalid hw type index\n");
+		WARN_ON(1);
+		return;
+	}
+
+	for (i = 0; i < src_rsrc_type_max; i++) {
+		for (j = 0; j < src_grp_idx_max; j = j + 2) {
+			val.x_min =
+			ipa3_rsrc_src_grp_config[hw_type_idx][i][j].min;
+			val.x_max =
+			ipa3_rsrc_src_grp_config[hw_type_idx][i][j].max;
+			val.y_min =
+			ipa3_rsrc_src_grp_config[hw_type_idx][i][j + 1].min;
+			val.y_max =
+			ipa3_rsrc_src_grp_config[hw_type_idx][i][j + 1].max;
 			ipa3_write_rsrc_grp_type_reg(j, i, true, &val);
 		}
 	}
 
 	IPADBG("Assign destination rsrc groups min-max limits\n");
 
-	for (i = 0; i < IPA_RSRC_GRP_TYPE_DST_MAX; i++) {
-		for (j = 0; j < IPA_GROUP_MAX; j = j + 2) {
-			val.x_min = ipa3_rsrc_dst_grp_config[i][j].min;
-			val.x_max = ipa3_rsrc_dst_grp_config[i][j].max;
-			val.y_min = ipa3_rsrc_dst_grp_config[i][j + 1].min;
-			val.y_max = ipa3_rsrc_dst_grp_config[i][j + 1].max;
+	for (i = 0; i < dst_rsrc_type_max; i++) {
+		for (j = 0; j < dst_grp_idx_max; j = j + 2) {
+			val.x_min =
+			ipa3_rsrc_dst_grp_config[hw_type_idx][i][j].min;
+			val.x_max =
+			ipa3_rsrc_dst_grp_config[hw_type_idx][i][j].max;
+			val.y_min =
+			ipa3_rsrc_dst_grp_config[hw_type_idx][i][j + 1].min;
+			val.y_max =
+			ipa3_rsrc_dst_grp_config[hw_type_idx][i][j + 1].max;
 			ipa3_write_rsrc_grp_type_reg(j, i, false, &val);
 		}
 	}
@@ -3364,9 +3774,13 @@ void ipa3_set_resorce_groups_min_max_limits(void)
 	IPADBG("Assign RX_HPS CMDQ rsrc groups min-max limits\n");
 
 	ipa3_configure_rx_hps_clients(0, true);
-	ipa3_configure_rx_hps_clients(1, true);
 	ipa3_configure_rx_hps_clients(0, false);
-	ipa3_configure_rx_hps_clients(1, false);
+
+	/* only hw_type v3_0\3_1 have 6 RX_HPS_CMDQ and needs depth 1*/
+	if (ipa3_ctx->ipa_hw_type <= IPA_HW_v3_1) {
+		ipa3_configure_rx_hps_clients(1, true);
+		ipa3_configure_rx_hps_clients(1, false);
+	}
 
 	IPADBG("EXIT\n");
 }
@@ -3655,3 +4069,21 @@ struct device *ipa3_get_pdev(void)
 
 	return ipa3_ctx->pdev;
 }
+
+/**
+ * ipa3_enable_dcd() - enable dynamic clock division on IPA
+ *
+ * Return value: Non applicable
+ *
+ */
+void ipa3_enable_dcd(void)
+{
+	struct ipahal_reg_idle_indication_cfg idle_indication_cfg;
+
+	/* recommended values for IPA 3.5 according to IPA HPG */
+	idle_indication_cfg.const_non_idle_enable = 0;
+	idle_indication_cfg.enter_idle_debounce_thresh = 256;
+
+	ipahal_write_reg_fields(IPA_IDLE_INDICATION_CFG,
+			&idle_indication_cfg);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 08decd8..e297dea 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -909,6 +909,24 @@ static void ipareg_construct_tx_cfg(enum ipahal_reg_name reg,
 		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
 }
 
+static void ipareg_construct_idle_indication_cfg(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_idle_indication_cfg *idle_indication_cfg;
+
+	idle_indication_cfg = (struct ipahal_reg_idle_indication_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val,
+		idle_indication_cfg->enter_idle_debounce_thresh,
+		IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_SHFT_V3_5,
+		IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_BMSK_V3_5);
+
+	IPA_SETFIELD_IN_REG(*val,
+		idle_indication_cfg->const_non_idle_enable,
+		IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_SHFT_V3_5,
+		IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5);
+}
+
 /*
  * struct ipahal_reg_obj - Register H/W information for specific IPA version
  * @construct - CB to construct register value from abstracted structure
@@ -1185,6 +1203,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
 	[IPA_HW_v3_5][IPA_SPARE_REG_2] = {
 		ipareg_construct_dummy, ipareg_parse_dummy,
 		0x00002784, 0},
+	[IPA_HW_v3_5][IPA_IDLE_INDICATION_CFG] = {
+		ipareg_construct_idle_indication_cfg, ipareg_parse_dummy,
+		0x00000220, 0},
 };
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index 8fb9040..98894c3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -83,6 +83,7 @@ enum ipahal_reg_name {
 	IPA_QSB_MAX_WRITES,
 	IPA_QSB_MAX_READS,
 	IPA_TX_CFG,
+	IPA_IDLE_INDICATION_CFG,
 	IPA_REG_MAX,
 };
 
@@ -330,6 +331,16 @@ struct ipahal_reg_tx_cfg {
 };
 
 /*
+ * struct ipahal_reg_idle_indication_cfg - IPA IDLE_INDICATION_CFG register
+ * @const_non_idle_enable: enable the asserting of the IDLE value and DCD
+ * @enter_idle_debounce_thresh:  configure the debounce threshold
+ */
+struct ipahal_reg_idle_indication_cfg {
+	u16 enter_idle_debounce_thresh;
+	bool const_non_idle_enable;
+};
+
+/*
  * ipahal_reg_name_str() - returns string that represent the register
  * @reg_name: [in] register name
  */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
index 1606a2f..342803f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -312,4 +312,10 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
 #define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5 (0x1C)
 #define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5 (2)
 
+/* IPA_IDLE_INDICATION_CFG regiser */
+#define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_BMSK_V3_5 (0xffff)
+#define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_SHFT_V3_5 (0)
+#define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5 (0x10000)
+#define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_SHFT_V3_5 (16)
+
 #endif /* _IPAHAL_REG_I_H_ */
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 18a93d3..d365349 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -327,6 +327,7 @@ static const struct of_device_id asm9260_dt_ids[] = {
 	{ .compatible = "alphascale,asm9260-rtc", },
 	{}
 };
+MODULE_DEVICE_TABLE(of, asm9260_dt_ids);
 
 static struct platform_driver asm9260_rtc_driver = {
 	.probe		= asm9260_rtc_probe,
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index dd3d598..7030d7c 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -776,7 +776,7 @@ static void cmos_do_shutdown(int rtc_irq)
 	spin_unlock_irq(&rtc_lock);
 }
 
-static void __exit cmos_do_remove(struct device *dev)
+static void cmos_do_remove(struct device *dev)
 {
 	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
 	struct resource *ports;
@@ -996,8 +996,9 @@ static u32 rtc_handler(void *context)
 	struct cmos_rtc *cmos = dev_get_drvdata(dev);
 	unsigned char rtc_control = 0;
 	unsigned char rtc_intr;
+	unsigned long flags;
 
-	spin_lock_irq(&rtc_lock);
+	spin_lock_irqsave(&rtc_lock, flags);
 	if (cmos_rtc.suspend_ctrl)
 		rtc_control = CMOS_READ(RTC_CONTROL);
 	if (rtc_control & RTC_AIE) {
@@ -1006,7 +1007,7 @@ static u32 rtc_handler(void *context)
 		rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
 		rtc_update_irq(cmos->rtc, 1, rtc_intr);
 	}
-	spin_unlock_irq(&rtc_lock);
+	spin_unlock_irqrestore(&rtc_lock, flags);
 
 	pm_wakeup_event(dev, 0);
 	acpi_clear_event(ACPI_EVENT_RTC);
@@ -1129,7 +1130,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
 				pnp_irq(pnp, 0));
 }
 
-static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
+static void cmos_pnp_remove(struct pnp_dev *pnp)
 {
 	cmos_do_remove(&pnp->dev);
 }
@@ -1161,7 +1162,7 @@ static struct pnp_driver cmos_pnp_driver = {
 	.name		= (char *) driver_name,
 	.id_table	= rtc_ids,
 	.probe		= cmos_pnp_probe,
-	.remove		= __exit_p(cmos_pnp_remove),
+	.remove		= cmos_pnp_remove,
 	.shutdown	= cmos_pnp_shutdown,
 
 	/* flag ensures resume() gets called, and stops syslog spam */
@@ -1238,7 +1239,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev)
 	return cmos_do_probe(&pdev->dev, resource, irq);
 }
 
-static int __exit cmos_platform_remove(struct platform_device *pdev)
+static int cmos_platform_remove(struct platform_device *pdev)
 {
 	cmos_do_remove(&pdev->dev);
 	return 0;
@@ -1263,7 +1264,7 @@ static void cmos_platform_shutdown(struct platform_device *pdev)
 MODULE_ALIAS("platform:rtc_cmos");
 
 static struct platform_driver cmos_platform_driver = {
-	.remove		= __exit_p(cmos_platform_remove),
+	.remove		= cmos_platform_remove,
 	.shutdown	= cmos_platform_shutdown,
 	.driver = {
 		.name		= driver_name,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index b04ea9b..51e5244 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -113,6 +113,7 @@
 /* OMAP_RTC_OSC_REG bit fields: */
 #define OMAP_RTC_OSC_32KCLK_EN		BIT(6)
 #define OMAP_RTC_OSC_SEL_32KCLK_SRC	BIT(3)
+#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE	BIT(4)
 
 /* OMAP_RTC_IRQWAKEEN bit fields: */
 #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN	BIT(1)
@@ -146,6 +147,7 @@ struct omap_rtc {
 	u8 interrupts_reg;
 	bool is_pmic_controller;
 	bool has_ext_clk;
+	bool is_suspending;
 	const struct omap_rtc_device_type *type;
 	struct pinctrl_dev *pctldev;
 };
@@ -786,8 +788,9 @@ static int omap_rtc_probe(struct platform_device *pdev)
 	 */
 	if (rtc->has_ext_clk) {
 		reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
-		rtc_write(rtc, OMAP_RTC_OSC_REG,
-			  reg | OMAP_RTC_OSC_SEL_32KCLK_SRC);
+		reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE;
+		reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC;
+		rtc_writel(rtc, OMAP_RTC_OSC_REG, reg);
 	}
 
 	rtc->type->lock(rtc);
@@ -898,8 +901,7 @@ static int omap_rtc_suspend(struct device *dev)
 		rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
 	rtc->type->lock(rtc);
 
-	/* Disable the clock/module */
-	pm_runtime_put_sync(dev);
+	rtc->is_suspending = true;
 
 	return 0;
 }
@@ -908,9 +910,6 @@ static int omap_rtc_resume(struct device *dev)
 {
 	struct omap_rtc *rtc = dev_get_drvdata(dev);
 
-	/* Enable the clock/module so that we can access the registers */
-	pm_runtime_get_sync(dev);
-
 	rtc->type->unlock(rtc);
 	if (device_may_wakeup(dev))
 		disable_irq_wake(rtc->irq_alarm);
@@ -918,11 +917,34 @@ static int omap_rtc_resume(struct device *dev)
 		rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg);
 	rtc->type->lock(rtc);
 
+	rtc->is_suspending = false;
+
 	return 0;
 }
 #endif
 
-static SIMPLE_DEV_PM_OPS(omap_rtc_pm_ops, omap_rtc_suspend, omap_rtc_resume);
+#ifdef CONFIG_PM
+static int omap_rtc_runtime_suspend(struct device *dev)
+{
+	struct omap_rtc *rtc = dev_get_drvdata(dev);
+
+	if (rtc->is_suspending && !rtc->has_ext_clk)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int omap_rtc_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops omap_rtc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(omap_rtc_suspend, omap_rtc_resume)
+	SET_RUNTIME_PM_OPS(omap_rtc_runtime_suspend,
+			   omap_rtc_runtime_resume, NULL)
+};
 
 static void omap_rtc_shutdown(struct platform_device *pdev)
 {
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 548debb..6f6c013 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -384,3 +384,52 @@
 	  communication failure can request the SHM to perform a system-wide
 	  health check. If any failures are detected during the health-check,
 	  then a subsystem restart will be triggered for the failed subsystem.
+
+config MSM_SUBSYSTEM_RESTART
+       bool "MSM Subsystem Restart"
+       help
+         This option enables the MSM subsystem restart framework.
+
+         The MSM subsystem restart framework provides support to boot,
+         shutdown, and restart subsystems with a reference counted API.
+         It also notifies userspace of transitions between these states via
+         sysfs.
+
+config MSM_PIL
+       bool "Peripheral image loading"
+       select FW_LOADER
+       default n
+       help
+         Some peripherals need to be loaded into memory before they can be
+         brought out of reset.
+
+         Say yes to support these devices.
+
+config MSM_SYSMON_GLINK_COMM
+       bool "MSM System Monitor communication support using GLINK transport"
+       depends on MSM_GLINK && MSM_SUBSYSTEM_RESTART
+       help
+         This option adds support for MSM System Monitor APIs using the GLINK
+         transport layer. The APIs provided may be used for notifying
+         subsystems within the SoC about other subsystems' power-up/down
+         state-changes.
+
+config MSM_PIL_SSR_GENERIC
+       tristate "MSM Subsystem Boot Support"
+       depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
+       help
+         Support for booting and shutting down MSM Subsystem processors.
+         This driver also monitors the SMSM status bits and the watchdog
+         interrupt for the subsystem and restarts it on a watchdog bite
+         or a fatal error. Subsystems include LPASS, Venus, VPU, WCNSS and
+         BCSS.
+
+config MSM_PIL_MSS_QDSP6V5
+       tristate "MSS QDSP6v5 (Hexagon) Boot Support"
+       depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
+       help
+         Support for booting and shutting down QDSP6v5 (Hexagon) processors
+         in modem subsystems. If you would like to make or receive phone
+         calls then say Y here.
+
+         If unsure, say N.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index e51984e..25ed482 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -42,3 +42,14 @@
 obj-$(CONFIG_MSM_GLINK_PKT) += msm_glink_pkt.o
 obj-$(CONFIG_MSM_SYSTEM_HEALTH_MONITOR)	+=	system_health_monitor_v01.o
 obj-$(CONFIG_MSM_SYSTEM_HEALTH_MONITOR)	+=	system_health_monitor.o
+obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o sysmon-qmi.o
+
+obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
+obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-msa.o pil-q6v5-mss.o
+obj-$(CONFIG_MSM_PIL)   +=      peripheral-loader.o
+
+ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+       obj-y += subsystem_notif.o
+       obj-y += subsystem_restart.o
+       obj-y += ramdump.o
+endif
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
new file mode 100644
index 0000000..1e593e0
--- /dev/null
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -0,0 +1,1112 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/elf.h>
+#include <linux/mutex.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/rwsem.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/secure_buffer.h>
+
+#include <linux/uaccess.h>
+#include <asm/setup.h>
+
+#include "peripheral-loader.h"
+
+#define pil_err(desc, fmt, ...)						\
+	dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
+#define pil_info(desc, fmt, ...)					\
+	dev_info(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
+
+#if defined(CONFIG_ARM)
+#define pil_memset_io(d, c, count) memset(d, c, count)
+#else
+#define pil_memset_io(d, c, count) memset_io(d, c, count)
+#endif
+
+#define PIL_NUM_DESC		10
+static void __iomem *pil_info_base;
+
+/**
+ * proxy_timeout - Override for proxy vote timeouts
+ * -1: Use driver-specified timeout
+ *  0: Hold proxy votes until shutdown
+ * >0: Specify a custom timeout in ms
+ */
+static int proxy_timeout_ms = -1;
+module_param(proxy_timeout_ms, int, 0644);
+
+static bool disable_timeouts;
+/**
+ * struct pil_mdt - Representation of <name>.mdt file in memory
+ * @hdr: ELF32 header
+ * @phdr: ELF32 program headers
+ */
+struct pil_mdt {
+	struct elf32_hdr hdr;
+	struct elf32_phdr phdr[];
+};
+
+/**
+ * struct pil_seg - memory map representing one segment
+ * @next: points to next seg mentor NULL if last segment
+ * @paddr: physical start address of segment
+ * @sz: size of segment
+ * @filesz: size of segment on disk
+ * @num: segment number
+ * @relocated: true if segment is relocated, false otherwise
+ *
+ * Loosely based on an elf program header. Contains all necessary information
+ * to load and initialize a segment of the image in memory.
+ */
+struct pil_seg {
+	phys_addr_t paddr;
+	unsigned long sz;
+	unsigned long filesz;
+	int num;
+	struct list_head list;
+	bool relocated;
+};
+
+/**
+ * struct pil_priv - Private state for a pil_desc
+ * @proxy: work item used to run the proxy unvoting routine
+ * @ws: wakeup source to prevent suspend during pil_boot
+ * @wname: name of @ws
+ * @desc: pointer to pil_desc this is private data for
+ * @seg: list of segments sorted by physical address
+ * @entry_addr: physical address where processor starts booting at
+ * @base_addr: smallest start address among all segments that are relocatable
+ * @region_start: address where relocatable region starts or lowest address
+ * for non-relocatable images
+ * @region_end: address where relocatable region ends or highest address for
+ * non-relocatable images
+ * @region: region allocated for relocatable images
+ * @unvoted_flag: flag to keep track if we have unvoted or not.
+ *
+ * This struct contains data for a pil_desc that should not be exposed outside
+ * of this file. This structure points to the descriptor and the descriptor
+ * points to this structure so that PIL drivers can't access the private
+ * data of a descriptor but this file can access both.
+ */
+struct pil_priv {
+	struct delayed_work proxy;
+	struct wakeup_source ws;
+	char wname[32];
+	struct pil_desc *desc;
+	struct list_head segs;
+	phys_addr_t entry_addr;
+	phys_addr_t base_addr;
+	phys_addr_t region_start;
+	phys_addr_t region_end;
+	void *region;
+	struct pil_image_info __iomem *info;
+	int id;
+	int unvoted_flag;
+	size_t region_size;
+};
+
+/**
+ * pil_do_ramdump() - Ramdump an image
+ * @desc: descriptor from pil_desc_init()
+ * @ramdump_dev: ramdump device returned from create_ramdump_device()
+ *
+ * Calls the ramdump API with a list of segments generated from the addresses
+ * that the descriptor corresponds to.
+ */
+int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+{
+	struct pil_priv *priv = desc->priv;
+	struct pil_seg *seg;
+	int count = 0, ret;
+	struct ramdump_segment *ramdump_segs, *s;
+
+	list_for_each_entry(seg, &priv->segs, list)
+		count++;
+
+	ramdump_segs = kcalloc(count, sizeof(*ramdump_segs), GFP_KERNEL);
+	if (!ramdump_segs)
+		return -ENOMEM;
+
+	if (desc->subsys_vmid > 0)
+		ret = pil_assign_mem_to_linux(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+
+	s = ramdump_segs;
+	list_for_each_entry(seg, &priv->segs, list) {
+		s->address = seg->paddr;
+		s->size = seg->sz;
+		s++;
+	}
+
+	ret = do_elf_ramdump(ramdump_dev, ramdump_segs, count);
+	kfree(ramdump_segs);
+
+	if (!ret && desc->subsys_vmid > 0)
+		ret = pil_assign_mem_to_subsys(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+
+	return ret;
+}
+EXPORT_SYMBOL(pil_do_ramdump);
+
+int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
+							size_t size)
+{
+	int ret;
+	int srcVM[1] = {VMID_HLOS};
+	int destVM[1] = {desc->subsys_vmid};
+	int destVMperm[1] = {PERM_READ | PERM_WRITE};
+
+	ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
+	if (ret)
+		pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+				__func__, &addr, size, desc->subsys_vmid, ret);
+	return ret;
+}
+EXPORT_SYMBOL(pil_assign_mem_to_subsys);
+
+int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
+							size_t size)
+{
+	int ret;
+	int srcVM[1] = {desc->subsys_vmid};
+	int destVM[1] = {VMID_HLOS};
+	int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+	ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
+	if (ret)
+		panic("%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+				__func__, &addr, size, desc->subsys_vmid, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(pil_assign_mem_to_linux);
+
+int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
+						phys_addr_t addr, size_t size)
+{
+	int ret;
+	int srcVM[1] = {VMID_HLOS};
+	int destVM[2] = {VMID_HLOS, desc->subsys_vmid};
+	int destVMperm[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
+
+	ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 2);
+	if (ret)
+		pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+				__func__, &addr, size, desc->subsys_vmid, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(pil_assign_mem_to_subsys_and_linux);
+
+int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr, size_t size,
+						int VMid)
+{
+	int ret;
+	int srcVM[2] = {VMID_HLOS, desc->subsys_vmid};
+	int destVM[1] = {VMid};
+	int destVMperm[1] = {PERM_READ | PERM_WRITE};
+
+	if (VMid == VMID_HLOS)
+		destVMperm[0] = PERM_READ | PERM_WRITE | PERM_EXEC;
+
+	ret = hyp_assign_phys(addr, size, srcVM, 2, destVM, destVMperm, 1);
+	if (ret)
+		panic("%s: failed for %pa address of size %zx - subsys VMid %d. Fatal error.\n",
+				__func__, &addr, size, desc->subsys_vmid);
+
+	return ret;
+}
+EXPORT_SYMBOL(pil_reclaim_mem);
+
+/**
+ * pil_get_entry_addr() - Retrieve the entry address of a peripheral image
+ * @desc: descriptor from pil_desc_init()
+ *
+ * Returns the physical address where the image boots at or 0 if unknown.
+ */
+phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
+{
+	return desc->priv ? desc->priv->entry_addr : 0;
+}
+EXPORT_SYMBOL(pil_get_entry_addr);
+
+static void __pil_proxy_unvote(struct pil_priv *priv)
+{
+	struct pil_desc *desc = priv->desc;
+
+	desc->ops->proxy_unvote(desc);
+	notify_proxy_unvote(desc->dev);
+	__pm_relax(&priv->ws);
+	module_put(desc->owner);
+
+}
+
+static void pil_proxy_unvote_work(struct work_struct *work)
+{
+	struct delayed_work *delayed = to_delayed_work(work);
+	struct pil_priv *priv = container_of(delayed, struct pil_priv, proxy);
+
+	__pil_proxy_unvote(priv);
+}
+
+static int pil_proxy_vote(struct pil_desc *desc)
+{
+	int ret = 0;
+	struct pil_priv *priv = desc->priv;
+
+	if (desc->ops->proxy_vote) {
+		__pm_stay_awake(&priv->ws);
+		ret = desc->ops->proxy_vote(desc);
+		if (ret)
+			__pm_relax(&priv->ws);
+	}
+
+	if (desc->proxy_unvote_irq)
+		enable_irq(desc->proxy_unvote_irq);
+	notify_proxy_vote(desc->dev);
+
+	return ret;
+}
+
+static void pil_proxy_unvote(struct pil_desc *desc, int immediate)
+{
+	struct pil_priv *priv = desc->priv;
+	unsigned long timeout;
+
+	if (proxy_timeout_ms == 0 && !immediate)
+		return;
+	else if (proxy_timeout_ms > 0)
+		timeout = proxy_timeout_ms;
+	else
+		timeout = desc->proxy_timeout;
+
+	if (desc->ops->proxy_unvote) {
+		if (WARN_ON(!try_module_get(desc->owner)))
+			return;
+
+		if (immediate)
+			timeout = 0;
+
+		if (!desc->proxy_unvote_irq || immediate)
+			schedule_delayed_work(&priv->proxy,
+					      msecs_to_jiffies(timeout));
+	}
+}
+
+static irqreturn_t proxy_unvote_intr_handler(int irq, void *dev_id)
+{
+	struct pil_desc *desc = dev_id;
+	struct pil_priv *priv = desc->priv;
+
+	pil_info(desc, "Power/Clock ready interrupt received\n");
+	if (!desc->priv->unvoted_flag) {
+		desc->priv->unvoted_flag = 1;
+		__pil_proxy_unvote(priv);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static bool segment_is_relocatable(const struct elf32_phdr *p)
+{
+	return !!(p->p_flags & BIT(27));
+}
+
+static phys_addr_t pil_reloc(const struct pil_priv *priv, phys_addr_t addr)
+{
+	return addr - priv->base_addr + priv->region_start;
+}
+
+static struct pil_seg *pil_init_seg(const struct pil_desc *desc,
+				  const struct elf32_phdr *phdr, int num)
+{
+	bool reloc = segment_is_relocatable(phdr);
+	const struct pil_priv *priv = desc->priv;
+	struct pil_seg *seg;
+
+	if (!reloc && memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
+		pil_err(desc, "Segment not relocatable,kernel memory would be overwritten[%#08lx, %#08lx)\n",
+		(unsigned long)phdr->p_paddr,
+		(unsigned long)(phdr->p_paddr + phdr->p_memsz));
+		return ERR_PTR(-EPERM);
+	}
+
+	if (phdr->p_filesz > phdr->p_memsz) {
+		pil_err(desc, "Segment %d: file size (%u) is greater than mem size (%u).\n",
+			num, phdr->p_filesz, phdr->p_memsz);
+		return ERR_PTR(-EINVAL);
+	}
+
+	seg = kmalloc(sizeof(*seg), GFP_KERNEL);
+	if (!seg)
+		return ERR_PTR(-ENOMEM);
+	seg->num = num;
+	seg->paddr = reloc ? pil_reloc(priv, phdr->p_paddr) : phdr->p_paddr;
+	seg->filesz = phdr->p_filesz;
+	seg->sz = phdr->p_memsz;
+	seg->relocated = reloc;
+	INIT_LIST_HEAD(&seg->list);
+
+	return seg;
+}
+
+#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
+
+static int segment_is_loadable(const struct elf32_phdr *p)
+{
+	return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags) &&
+		p->p_memsz;
+}
+
+static void pil_dump_segs(const struct pil_priv *priv)
+{
+	struct pil_seg *seg;
+	phys_addr_t seg_h_paddr;
+
+	list_for_each_entry(seg, &priv->segs, list) {
+		seg_h_paddr = seg->paddr + seg->sz;
+		pil_info(priv->desc, "%d: %pa %pa\n", seg->num,
+				&seg->paddr, &seg_h_paddr);
+	}
+}
+
+/*
+ * Ensure the entry address lies within the image limits and if the image is
+ * relocatable ensure it lies within a relocatable segment.
+ */
+static int pil_init_entry_addr(struct pil_priv *priv, const struct pil_mdt *mdt)
+{
+	struct pil_seg *seg;
+	phys_addr_t entry = mdt->hdr.e_entry;
+	bool image_relocated = priv->region;
+
+	if (image_relocated)
+		entry = pil_reloc(priv, entry);
+	priv->entry_addr = entry;
+
+	if (priv->desc->flags & PIL_SKIP_ENTRY_CHECK)
+		return 0;
+
+	list_for_each_entry(seg, &priv->segs, list) {
+		if (entry >= seg->paddr && entry < seg->paddr + seg->sz) {
+			if (!image_relocated)
+				return 0;
+			else if (seg->relocated)
+				return 0;
+		}
+	}
+	pil_err(priv->desc, "entry address %pa not within range\n", &entry);
+	pil_dump_segs(priv);
+	return -EADDRNOTAVAIL;
+}
+
+static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr,
+				phys_addr_t max_addr, size_t align)
+{
+	void *region;
+	size_t size = max_addr - min_addr;
+	size_t aligned_size;
+
+	/* Don't reallocate due to fragmentation concerns, just sanity check */
+	if (priv->region) {
+		if (WARN(priv->region_end - priv->region_start < size,
+			"Can't reuse PIL memory, too small\n"))
+			return -ENOMEM;
+		return 0;
+	}
+
+	if (align > SZ_4M)
+		aligned_size = ALIGN(size, SZ_4M);
+	else
+		aligned_size = ALIGN(size, SZ_1M);
+
+	priv->desc->attrs = 0;
+	priv->desc->attrs |= DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
+
+	region = dma_alloc_attrs(priv->desc->dev, aligned_size,
+				&priv->region_start, GFP_KERNEL,
+				priv->desc->attrs);
+
+	if (region == NULL) {
+		pil_err(priv->desc, "Failed to allocate relocatable region of size %zx\n",
+					size);
+		return -ENOMEM;
+	}
+
+	priv->region = region;
+	priv->region_end = priv->region_start + size;
+	priv->base_addr = min_addr;
+	priv->region_size = aligned_size;
+
+	return 0;
+}
+
+static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt)
+{
+	const struct elf32_phdr *phdr;
+	phys_addr_t min_addr_r, min_addr_n, max_addr_r, max_addr_n, start, end;
+	size_t align = 0;
+	int i, ret = 0;
+	bool relocatable = false;
+
+	min_addr_n = min_addr_r = (phys_addr_t)ULLONG_MAX;
+	max_addr_n = max_addr_r = 0;
+
+	/* Find the image limits */
+	for (i = 0; i < mdt->hdr.e_phnum; i++) {
+		phdr = &mdt->phdr[i];
+		if (!segment_is_loadable(phdr))
+			continue;
+
+		start = phdr->p_paddr;
+		end = start + phdr->p_memsz;
+
+		if (segment_is_relocatable(phdr)) {
+			min_addr_r = min(min_addr_r, start);
+			max_addr_r = max(max_addr_r, end);
+			/*
+			 * Lowest relocatable segment dictates alignment of
+			 * relocatable region
+			 */
+			if (min_addr_r == start)
+				align = phdr->p_align;
+			relocatable = true;
+		} else {
+			min_addr_n = min(min_addr_n, start);
+			max_addr_n = max(max_addr_n, end);
+		}
+
+	}
+
+	/*
+	 * Align the max address to the next 4K boundary to satisfy iommus and
+	 * XPUs that operate on 4K chunks.
+	 */
+	max_addr_n = ALIGN(max_addr_n, SZ_4K);
+	max_addr_r = ALIGN(max_addr_r, SZ_4K);
+
+	if (relocatable) {
+		ret = pil_alloc_region(priv, min_addr_r, max_addr_r, align);
+	} else {
+		priv->region_start = min_addr_n;
+		priv->region_end = max_addr_n;
+		priv->base_addr = min_addr_n;
+	}
+
+	if (priv->info) {
+		__iowrite32_copy(&priv->info->start, &priv->region_start,
+					sizeof(priv->region_start) / 4);
+		writel_relaxed(priv->region_end - priv->region_start,
+				&priv->info->size);
+	}
+
+	return ret;
+}
+
+static int pil_cmp_seg(void *priv, struct list_head *a, struct list_head *b)
+{
+	int ret = 0;
+	struct pil_seg *seg_a = list_entry(a, struct pil_seg, list);
+	struct pil_seg *seg_b = list_entry(b, struct pil_seg, list);
+
+	if (seg_a->paddr < seg_b->paddr)
+		ret = -1;
+	else if (seg_a->paddr > seg_b->paddr)
+		ret = 1;
+
+	return ret;
+}
+
+static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt)
+{
+	struct pil_priv *priv = desc->priv;
+	const struct elf32_phdr *phdr;
+	struct pil_seg *seg;
+	int i, ret;
+
+	ret = pil_setup_region(priv, mdt);
+	if (ret)
+		return ret;
+
+
+	pil_info(desc, "loading from %pa to %pa\n", &priv->region_start,
+							&priv->region_end);
+
+	for (i = 0; i < mdt->hdr.e_phnum; i++) {
+		phdr = &mdt->phdr[i];
+		if (!segment_is_loadable(phdr))
+			continue;
+
+		seg = pil_init_seg(desc, phdr, i);
+		if (IS_ERR(seg))
+			return PTR_ERR(seg);
+
+		list_add_tail(&seg->list, &priv->segs);
+	}
+	list_sort(NULL, &priv->segs, pil_cmp_seg);
+
+	return pil_init_entry_addr(priv, mdt);
+}
+
+static void pil_release_mmap(struct pil_desc *desc)
+{
+	struct pil_priv *priv = desc->priv;
+	struct pil_seg *p, *tmp;
+	u64 zero = 0ULL;
+
+	if (priv->info) {
+		__iowrite32_copy(&priv->info->start, &zero,
+					sizeof(zero) / 4);
+		writel_relaxed(0, &priv->info->size);
+	}
+
+	list_for_each_entry_safe(p, tmp, &priv->segs, list) {
+		list_del(&p->list);
+		kfree(p);
+	}
+}
+
+#define IOMAP_SIZE SZ_1M
+
+struct pil_map_fw_info {
+	void *region;
+	unsigned long attrs;
+	phys_addr_t base_addr;
+	struct device *dev;
+};
+
+static void *map_fw_mem(phys_addr_t paddr, size_t size, void *data)
+{
+	struct pil_map_fw_info *info = data;
+
+	return dma_remap(info->dev, info->region, paddr, size,
+					info->attrs);
+}
+
+static void unmap_fw_mem(void *vaddr, size_t size, void *data)
+{
+	struct pil_map_fw_info *info = data;
+
+	dma_unremap(info->dev, vaddr, size);
+}
+
+static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
+{
+	int ret = 0, count;
+	phys_addr_t paddr;
+	char fw_name[30];
+	int num = seg->num;
+	const struct firmware *fw = NULL;
+	struct pil_map_fw_info map_fw_info = {
+		.attrs = desc->attrs,
+		.region = desc->priv->region,
+		.base_addr = desc->priv->region_start,
+		.dev = desc->dev,
+	};
+	void *map_data = desc->map_data ? desc->map_data : &map_fw_info;
+
+	if (seg->filesz) {
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
+				desc->fw_name, num);
+		ret = request_firmware_into_buf(&fw, fw_name, desc->dev,
+						map_data, seg->filesz);
+		if (ret < 0) {
+			pil_err(desc, "Failed to locate blob %s or blob is too big(rc:%d)\n",
+				fw_name, ret);
+			return ret;
+		}
+
+		if (ret != seg->filesz) {
+			pil_err(desc, "Blob size %u doesn't match %lu\n",
+					ret, seg->filesz);
+			return -EPERM;
+		}
+		ret = 0;
+	}
+
+	/* Zero out trailing memory */
+	paddr = seg->paddr + seg->filesz;
+	count = seg->sz - seg->filesz;
+	while (count > 0) {
+		int size;
+		u8 __iomem *buf;
+
+		size = min_t(size_t, IOMAP_SIZE, count);
+		buf = desc->map_fw_mem(paddr, size, map_data);
+		if (!buf) {
+			pil_err(desc, "Failed to map memory\n");
+			return -ENOMEM;
+		}
+		pil_memset_io(buf, 0, size);
+
+		desc->unmap_fw_mem(buf, size, map_data);
+
+		count -= size;
+		paddr += size;
+	}
+
+	if (desc->ops->verify_blob) {
+		ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
+		if (ret)
+			pil_err(desc, "Blob%u failed verification(rc:%d)\n",
+								num, ret);
+	}
+
+	return ret;
+}
+
+static int pil_parse_devicetree(struct pil_desc *desc)
+{
+	struct device_node *ofnode = desc->dev->of_node;
+	int clk_ready = 0;
+
+	if (!ofnode)
+		return -EINVAL;
+
+	if (of_property_read_u32(ofnode, "qcom,mem-protect-id",
+					&desc->subsys_vmid))
+		pr_debug("Unable to read the addr-protect-id for %s\n",
+					desc->name);
+
+	if (desc->ops->proxy_unvote && of_find_property(ofnode,
+					"qcom,gpio-proxy-unvote",
+					NULL)) {
+		clk_ready = of_get_named_gpio(ofnode,
+				"qcom,gpio-proxy-unvote", 0);
+
+		if (clk_ready < 0) {
+			dev_dbg(desc->dev,
+				"[%s]: Error getting proxy unvoting gpio\n",
+				desc->name);
+			return clk_ready;
+		}
+
+		clk_ready = gpio_to_irq(clk_ready);
+		if (clk_ready < 0) {
+			dev_err(desc->dev,
+				"[%s]: Error getting proxy unvote IRQ\n",
+				desc->name);
+			return clk_ready;
+		}
+	}
+	desc->proxy_unvote_irq = clk_ready;
+	return 0;
+}
+
+/* Synchronize request_firmware() with suspend */
+static DECLARE_RWSEM(pil_pm_rwsem);
+
+/**
+ * pil_boot() - Load a peripheral image into memory and boot it
+ * @desc: descriptor from pil_desc_init()
+ *
+ * Returns 0 on success or -ERROR on failure.
+ */
+int pil_boot(struct pil_desc *desc)
+{
+	int ret;
+	char fw_name[30];
+	const struct pil_mdt *mdt;
+	const struct elf32_hdr *ehdr;
+	struct pil_seg *seg;
+	const struct firmware *fw;
+	struct pil_priv *priv = desc->priv;
+	bool mem_protect = false;
+	bool hyp_assign = false;
+
+	if (desc->shutdown_fail)
+		pil_err(desc, "Subsystem shutdown failed previously!\n");
+
+	/* Reinitialize for new image */
+	pil_release_mmap(desc);
+
+	down_read(&pil_pm_rwsem);
+	snprintf(fw_name, sizeof(fw_name), "%s.mdt", desc->fw_name);
+	ret = request_firmware(&fw, fw_name, desc->dev);
+	if (ret) {
+		pil_err(desc, "Failed to locate %s(rc:%d)\n", fw_name, ret);
+		goto out;
+	}
+
+	if (fw->size < sizeof(*ehdr)) {
+		pil_err(desc, "Not big enough to be an elf header\n");
+		ret = -EIO;
+		goto release_fw;
+	}
+
+	mdt = (const struct pil_mdt *)fw->data;
+	ehdr = &mdt->hdr;
+
+	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+		pil_err(desc, "Not an elf header\n");
+		ret = -EIO;
+		goto release_fw;
+	}
+
+	if (ehdr->e_phnum == 0) {
+		pil_err(desc, "No loadable segments\n");
+		ret = -EIO;
+		goto release_fw;
+	}
+	if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+	    sizeof(struct elf32_hdr) > fw->size) {
+		pil_err(desc, "Program headers not within mdt\n");
+		ret = -EIO;
+		goto release_fw;
+	}
+
+	ret = pil_init_mmap(desc, mdt);
+	if (ret)
+		goto release_fw;
+
+	desc->priv->unvoted_flag = 0;
+	ret = pil_proxy_vote(desc);
+	if (ret) {
+		pil_err(desc, "Failed to proxy vote(rc:%d)\n", ret);
+		goto release_fw;
+	}
+
+	if (desc->ops->init_image)
+		ret = desc->ops->init_image(desc, fw->data, fw->size);
+	if (ret) {
+		pil_err(desc, "Initializing image failed(rc:%d)\n", ret);
+		goto err_boot;
+	}
+
+	if (desc->ops->mem_setup)
+		ret = desc->ops->mem_setup(desc, priv->region_start,
+				priv->region_end - priv->region_start);
+	if (ret) {
+		pil_err(desc, "Memory setup error(rc:%d)\n", ret);
+		goto err_deinit_image;
+	}
+
+	if (desc->subsys_vmid > 0) {
+		/**
+		 * In case of modem ssr, we need to assign memory back to linux.
+		 * This is not true after cold boot since linux already owns it.
+		 * Also for secure boot devices, modem memory has to be released
+		 * after MBA is booted
+		 */
+		if (desc->modem_ssr) {
+			ret = pil_assign_mem_to_linux(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+			if (ret)
+				pil_err(desc, "Failed to assign to linux, ret- %d\n",
+								ret);
+		}
+		ret = pil_assign_mem_to_subsys_and_linux(desc,
+				priv->region_start,
+				(priv->region_end - priv->region_start));
+		if (ret) {
+			pil_err(desc, "Failed to assign memory, ret - %d\n",
+								ret);
+			goto err_deinit_image;
+		}
+		hyp_assign = true;
+	}
+
+	list_for_each_entry(seg, &desc->priv->segs, list) {
+		ret = pil_load_seg(desc, seg);
+		if (ret)
+			goto err_deinit_image;
+	}
+
+	if (desc->subsys_vmid > 0) {
+		ret =  pil_reclaim_mem(desc, priv->region_start,
+				(priv->region_end - priv->region_start),
+				desc->subsys_vmid);
+		if (ret) {
+			pil_err(desc, "Failed to assign %s memory, ret - %d\n",
+							desc->name, ret);
+			goto err_deinit_image;
+		}
+		hyp_assign = false;
+	}
+
+	ret = desc->ops->auth_and_reset(desc);
+	if (ret) {
+		pil_err(desc, "Failed to bring out of reset(rc:%d)\n", ret);
+		goto err_auth_and_reset;
+	}
+	pil_info(desc, "Brought out of reset\n");
+	desc->modem_ssr = false;
+err_auth_and_reset:
+	if (ret && desc->subsys_vmid > 0) {
+		pil_assign_mem_to_linux(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+		mem_protect = true;
+	}
+err_deinit_image:
+	if (ret && desc->ops->deinit_image)
+		desc->ops->deinit_image(desc);
+err_boot:
+	if (ret && desc->proxy_unvote_irq)
+		disable_irq(desc->proxy_unvote_irq);
+	pil_proxy_unvote(desc, ret);
+release_fw:
+	release_firmware(fw);
+out:
+	up_read(&pil_pm_rwsem);
+	if (ret) {
+		if (priv->region) {
+			if (desc->subsys_vmid > 0 && !mem_protect &&
+					hyp_assign) {
+				pil_reclaim_mem(desc, priv->region_start,
+					(priv->region_end -
+						priv->region_start),
+					VMID_HLOS);
+			}
+			dma_free_attrs(desc->dev, priv->region_size,
+					priv->region, priv->region_start,
+					desc->attrs);
+			priv->region = NULL;
+		}
+		pil_release_mmap(desc);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(pil_boot);
+
+/**
+ * pil_shutdown() - Shutdown a peripheral
+ * @desc: descriptor from pil_desc_init()
+ */
+void pil_shutdown(struct pil_desc *desc)
+{
+	struct pil_priv *priv = desc->priv;
+
+	if (desc->ops->shutdown) {
+		if (desc->ops->shutdown(desc))
+			desc->shutdown_fail = true;
+		else
+			desc->shutdown_fail = false;
+	}
+
+	if (desc->proxy_unvote_irq) {
+		disable_irq(desc->proxy_unvote_irq);
+		if (!desc->priv->unvoted_flag)
+			pil_proxy_unvote(desc, 1);
+	} else if (!proxy_timeout_ms)
+		pil_proxy_unvote(desc, 1);
+	else
+		flush_delayed_work(&priv->proxy);
+	desc->modem_ssr = true;
+}
+EXPORT_SYMBOL(pil_shutdown);
+
+/**
+ * pil_free_memory() - Free memory resources associated with a peripheral
+ * @desc: descriptor from pil_desc_init()
+ */
+void pil_free_memory(struct pil_desc *desc)
+{
+	struct pil_priv *priv = desc->priv;
+
+	if (priv->region) {
+		if (desc->subsys_vmid > 0)
+			pil_assign_mem_to_linux(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+		dma_free_attrs(desc->dev, priv->region_size,
+				priv->region, priv->region_start, desc->attrs);
+		priv->region = NULL;
+	}
+}
+EXPORT_SYMBOL(pil_free_memory);
+
+static DEFINE_IDA(pil_ida);
+
+bool is_timeout_disabled(void)
+{
+	return disable_timeouts;
+}
+/**
+ * pil_desc_init() - Initialize a pil descriptor
+ * @desc: descriptor to initialize
+ *
+ * Initialize a pil descriptor for use by other pil functions. This function
+ * must be called before calling pil_boot() or pil_shutdown().
+ *
+ * Returns 0 for success and -ERROR on failure.
+ */
+int pil_desc_init(struct pil_desc *desc)
+{
+	struct pil_priv *priv;
+	int ret;
+	void __iomem *addr;
+	char buf[sizeof(priv->info->name)];
+
+	if (WARN(desc->ops->proxy_unvote && !desc->ops->proxy_vote,
+				"Invalid proxy voting. Ignoring\n"))
+		((struct pil_reset_ops *)desc->ops)->proxy_unvote = NULL;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	desc->priv = priv;
+	priv->desc = desc;
+
+	priv->id = ret = ida_simple_get(&pil_ida, 0, PIL_NUM_DESC, GFP_KERNEL);
+	if (priv->id < 0)
+		goto err;
+
+	if (pil_info_base) {
+		addr = pil_info_base + sizeof(struct pil_image_info) * priv->id;
+		priv->info = (struct pil_image_info __iomem *)addr;
+
+		strlcpy(buf, desc->name, sizeof(buf));
+		__iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4);
+	}
+
+	ret = pil_parse_devicetree(desc);
+	if (ret)
+		goto err_parse_dt;
+
+	/* Ignore users who don't make any sense */
+	WARN(desc->ops->proxy_unvote && desc->proxy_unvote_irq == 0
+		 && !desc->proxy_timeout,
+		 "Invalid proxy unvote callback or a proxy timeout of 0 was specified or no proxy unvote IRQ was specified.\n");
+
+	if (desc->proxy_unvote_irq) {
+		ret = request_threaded_irq(desc->proxy_unvote_irq,
+				  NULL,
+				  proxy_unvote_intr_handler,
+				  IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+				  desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev,
+				"Unable to request proxy unvote IRQ: %d\n",
+				ret);
+			goto err;
+		}
+		disable_irq(desc->proxy_unvote_irq);
+	}
+
+	snprintf(priv->wname, sizeof(priv->wname), "pil-%s", desc->name);
+	wakeup_source_init(&priv->ws, priv->wname);
+	INIT_DELAYED_WORK(&priv->proxy, pil_proxy_unvote_work);
+	INIT_LIST_HEAD(&priv->segs);
+
+	/* Make sure mapping functions are set. */
+	if (!desc->map_fw_mem)
+		desc->map_fw_mem = map_fw_mem;
+
+	if (!desc->unmap_fw_mem)
+		desc->unmap_fw_mem = unmap_fw_mem;
+
+	return 0;
+err_parse_dt:
+	ida_simple_remove(&pil_ida, priv->id);
+err:
+	kfree(priv);
+	return ret;
+}
+EXPORT_SYMBOL(pil_desc_init);
+
+/**
+ * pil_desc_release() - Release a pil descriptor
+ * @desc: descriptor to free
+ */
+void pil_desc_release(struct pil_desc *desc)
+{
+	struct pil_priv *priv = desc->priv;
+
+	if (priv) {
+		ida_simple_remove(&pil_ida, priv->id);
+		flush_delayed_work(&priv->proxy);
+		wakeup_source_trash(&priv->ws);
+	}
+	desc->priv = NULL;
+	kfree(priv);
+}
+EXPORT_SYMBOL(pil_desc_release);
+
+static int pil_pm_notify(struct notifier_block *b, unsigned long event, void *p)
+{
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		down_write(&pil_pm_rwsem);
+		break;
+	case PM_POST_SUSPEND:
+		up_write(&pil_pm_rwsem);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block pil_pm_notifier = {
+	.notifier_call = pil_pm_notify,
+};
+
+static int __init msm_pil_init(void)
+{
+	struct device_node *np;
+	struct resource res;
+	int i;
+
+	np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-pil");
+	if (!np) {
+		pr_warn("pil: failed to find qcom,msm-imem-pil node\n");
+		goto out;
+	}
+	if (of_address_to_resource(np, 0, &res)) {
+		pr_warn("pil: address to resource on imem region failed\n");
+		goto out;
+	}
+	pil_info_base = ioremap(res.start, resource_size(&res));
+	if (!pil_info_base) {
+		pr_warn("pil: could not map imem region\n");
+		goto out;
+	}
+	if (__raw_readl(pil_info_base) == 0x53444247) {
+		pr_info("pil: pil-imem set to disable pil timeouts\n");
+		disable_timeouts = true;
+	}
+	for (i = 0; i < resource_size(&res)/sizeof(u32); i++)
+		writel_relaxed(0, pil_info_base + (i * sizeof(u32)));
+
+out:
+	return register_pm_notifier(&pil_pm_notifier);
+}
+device_initcall(msm_pil_init);
+
+static void __exit msm_pil_exit(void)
+{
+	unregister_pm_notifier(&pil_pm_notifier);
+	if (pil_info_base)
+		iounmap(pil_info_base);
+}
+module_exit(msm_pil_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Load peripheral images and bring peripherals out of reset");
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
new file mode 100644
index 0000000..752a6ce
--- /dev/null
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -0,0 +1,151 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_PERIPHERAL_LOADER_H
+#define __MSM_PERIPHERAL_LOADER_H
+
+struct device;
+struct module;
+struct pil_priv;
+
+/**
+ * struct pil_desc - PIL descriptor
+ * @name: string used for pil_get()
+ * @fw_name: firmware name
+ * @dev: parent device
+ * @ops: callback functions
+ * @owner: module the descriptor belongs to
+ * @proxy_timeout: delay in ms until proxy vote is removed
+ * @flags: bitfield for image flags
+ * @priv: DON'T USE - internal only
+ * @attrs: DMA attributes to be used during dma allocation.
+ * @proxy_unvote_irq: IRQ to trigger a proxy unvote. proxy_timeout
+ * is ignored if this is set.
+ * @map_fw_mem: Custom function used to map physical address space to virtual.
+ * This defaults to ioremap if not specified.
+ * @unmap_fw_mem: Custom function used to undo mapping by map_fw_mem.
+ * This defaults to iounmap if not specified.
+ * @shutdown_fail: Set if PIL op for shutting down subsystem fails.
+ * @modem_ssr: true if modem is restarting, false if booting for first time.
+ * @subsys_vmid: memprot id for the subsystem.
+ */
+struct pil_desc {
+	const char *name;
+	const char *fw_name;
+	struct device *dev;
+	const struct pil_reset_ops *ops;
+	struct module *owner;
+	unsigned long proxy_timeout;
+	unsigned long flags;
+#define PIL_SKIP_ENTRY_CHECK	BIT(0)
+	struct pil_priv *priv;
+	unsigned long attrs;
+	unsigned int proxy_unvote_irq;
+	void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data);
+	void (*unmap_fw_mem)(void *virt, size_t size, void *data);
+	void *map_data;
+	bool shutdown_fail;
+	bool modem_ssr;
+	u32 subsys_vmid;
+};
+
+/**
+ * struct pil_image_info - info in IMEM about image and where it is loaded
+ * @name: name of image (may or may not be NULL terminated)
+ * @start: indicates physical address where image starts (little endian)
+ * @size: size of image (little endian)
+ */
+struct pil_image_info {
+	char name[8];
+	__le64 start;
+	__le32 size;
+} __attribute__((__packed__));
+
+/**
+ * struct pil_reset_ops - PIL operations
+ * @init_image: prepare an image for authentication
+ * @mem_setup: prepare the image memory region
+ * @verify_blob: authenticate a program segment, called once for each loadable
+ *		 program segment (optional)
+ * @proxy_vote: make proxy votes before auth_and_reset (optional)
+ * @auth_and_reset: boot the processor
+ * @proxy_unvote: remove any proxy votes (optional)
+ * @deinit_image: restore actions performed in init_image if necessary
+ * @shutdown: shutdown the processor
+ */
+struct pil_reset_ops {
+	int (*init_image)(struct pil_desc *pil, const u8 *metadata,
+			  size_t size);
+	int (*mem_setup)(struct pil_desc *pil, phys_addr_t addr, size_t size);
+	int (*verify_blob)(struct pil_desc *pil, phys_addr_t phy_addr,
+			   size_t size);
+	int (*proxy_vote)(struct pil_desc *pil);
+	int (*auth_and_reset)(struct pil_desc *pil);
+	void (*proxy_unvote)(struct pil_desc *pil);
+	int (*deinit_image)(struct pil_desc *pil);
+	int (*shutdown)(struct pil_desc *pil);
+};
+
+#ifdef CONFIG_MSM_PIL
+extern int pil_desc_init(struct pil_desc *desc);
+extern int pil_boot(struct pil_desc *desc);
+extern void pil_shutdown(struct pil_desc *desc);
+extern void pil_free_memory(struct pil_desc *desc);
+extern void pil_desc_release(struct pil_desc *desc);
+extern phys_addr_t pil_get_entry_addr(struct pil_desc *desc);
+extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev);
+extern int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
+						size_t size);
+extern int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
+						size_t size);
+extern int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
+						phys_addr_t addr, size_t size);
+extern int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr, size_t size,
+						int VMid);
+extern bool is_timeout_disabled(void);
+#else
+static inline int pil_desc_init(struct pil_desc *desc) { return 0; }
+static inline int pil_boot(struct pil_desc *desc) { return 0; }
+static inline void pil_shutdown(struct pil_desc *desc) { }
+static inline void pil_free_memory(struct pil_desc *desc) { }
+static inline void pil_desc_release(struct pil_desc *desc) { }
+static inline phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
+{
+	return 0;
+}
+static inline int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+{
+	return 0;
+}
+static inline int pil_assign_mem_to_subsys(struct pil_desc *desc,
+						phys_addr_t addr, size_t size)
+{
+	return 0;
+}
+static inline int pil_assign_mem_to_linux(struct pil_desc *desc,
+						phys_addr_t addr, size_t size)
+{
+	return 0;
+}
+static inline int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
+						phys_addr_t addr, size_t size)
+{
+	return 0;
+}
+static inline int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr,
+					size_t size, int VMid)
+{
+	return 0;
+}
+extern bool is_timeout_disabled(void) { return false; }
+#endif
+
+#endif
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
new file mode 100644
index 0000000..ffe72e6
--- /dev/null
+++ b/drivers/soc/qcom/pil-msa.c
@@ -0,0 +1,847 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+/* Q6 Register Offsets */
+#define QDSP6SS_RST_EVB			0x010
+#define QDSP6SS_DBG_CFG			0x018
+
+/* AXI Halting Registers */
+#define MSS_Q6_HALT_BASE		0x180
+#define MSS_MODEM_HALT_BASE		0x200
+#define MSS_NC_HALT_BASE		0x280
+
+/* RMB Status Register Values */
+#define STATUS_PBL_SUCCESS		0x1
+#define STATUS_XPU_UNLOCKED		0x1
+#define STATUS_XPU_UNLOCKED_SCRIBBLED	0x2
+
+/* PBL/MBA interface registers */
+#define RMB_MBA_IMAGE			0x00
+#define RMB_PBL_STATUS			0x04
+#define RMB_MBA_COMMAND			0x08
+#define RMB_MBA_STATUS			0x0C
+#define RMB_PMI_META_DATA		0x10
+#define RMB_PMI_CODE_START		0x14
+#define RMB_PMI_CODE_LENGTH		0x18
+#define RMB_PROTOCOL_VERSION		0x1C
+#define RMB_MBA_DEBUG_INFORMATION	0x20
+
+#define POLL_INTERVAL_US		50
+
+#define CMD_META_DATA_READY		0x1
+#define CMD_LOAD_READY			0x2
+#define CMD_PILFAIL_NFY_MBA		0xffffdead
+
+#define STATUS_META_DATA_AUTH_SUCCESS	0x3
+#define STATUS_AUTH_COMPLETE		0x4
+#define STATUS_MBA_UNLOCKED		0x6
+
+/* External BHS */
+#define EXTERNAL_BHS_ON			BIT(0)
+#define EXTERNAL_BHS_STATUS		BIT(4)
+#define BHS_TIMEOUT_US			50
+
+#define MSS_RESTART_PARAM_ID		0x2
+#define MSS_RESTART_ID			0xA
+
+#define MSS_MAGIC			0XAABADEAD
+enum scm_cmd {
+	PAS_MEM_SETUP_CMD = 2,
+};
+
+static int pbl_mba_boot_timeout_ms = 1000;
+module_param(pbl_mba_boot_timeout_ms, int, 0644);
+
+static int modem_auth_timeout_ms = 10000;
+module_param(modem_auth_timeout_ms, int, 0644);
+
+/* If set to 0xAABADEAD, MBA failures trigger a kernel panic */
+static uint modem_trigger_panic;
+module_param(modem_trigger_panic, uint, 0644);
+
+/* To set the modem debug cookie in DBG_CFG register for debugging */
+static uint modem_dbg_cfg;
+module_param(modem_dbg_cfg, uint, 0644);
+
+static void modem_log_rmb_regs(void __iomem *base)
+{
+	pr_err("RMB_MBA_IMAGE: %08x\n", readl_relaxed(base + RMB_MBA_IMAGE));
+	pr_err("RMB_PBL_STATUS: %08x\n", readl_relaxed(base + RMB_PBL_STATUS));
+	pr_err("RMB_MBA_COMMAND: %08x\n",
+				readl_relaxed(base + RMB_MBA_COMMAND));
+	pr_err("RMB_MBA_STATUS: %08x\n", readl_relaxed(base + RMB_MBA_STATUS));
+	pr_err("RMB_PMI_META_DATA: %08x\n",
+				readl_relaxed(base + RMB_PMI_META_DATA));
+	pr_err("RMB_PMI_CODE_START: %08x\n",
+				readl_relaxed(base + RMB_PMI_CODE_START));
+	pr_err("RMB_PMI_CODE_LENGTH: %08x\n",
+				readl_relaxed(base + RMB_PMI_CODE_LENGTH));
+	pr_err("RMB_PROTOCOL_VERSION: %08x\n",
+				readl_relaxed(base + RMB_PROTOCOL_VERSION));
+	pr_err("RMB_MBA_DEBUG_INFORMATION: %08x\n",
+			readl_relaxed(base + RMB_MBA_DEBUG_INFORMATION));
+
+	if (modem_trigger_panic == MSS_MAGIC)
+		panic("%s: System ramdump is needed!!!\n", __func__);
+}
+
+static int pil_mss_power_up(struct q6v5_data *drv)
+{
+	int ret = 0;
+	u32 regval;
+
+	if (drv->vreg) {
+		ret = regulator_enable(drv->vreg);
+		if (ret)
+			dev_err(drv->desc.dev, "Failed to enable modem regulator(rc:%d)\n",
+									ret);
+	}
+
+	if (drv->cxrail_bhs) {
+		regval = readl_relaxed(drv->cxrail_bhs);
+		regval |= EXTERNAL_BHS_ON;
+		writel_relaxed(regval, drv->cxrail_bhs);
+
+		ret = readl_poll_timeout(drv->cxrail_bhs, regval,
+			regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
+	}
+
+	return ret;
+}
+
+static int pil_mss_power_down(struct q6v5_data *drv)
+{
+	u32 regval;
+
+	if (drv->cxrail_bhs) {
+		regval = readl_relaxed(drv->cxrail_bhs);
+		regval &= ~EXTERNAL_BHS_ON;
+		writel_relaxed(regval, drv->cxrail_bhs);
+	}
+
+	if (drv->vreg)
+		return regulator_disable(drv->vreg);
+
+	return 0;
+}
+
+static int pil_mss_enable_clks(struct q6v5_data *drv)
+{
+	int ret;
+
+	ret = clk_prepare_enable(drv->ahb_clk);
+	if (ret)
+		goto err_ahb_clk;
+	ret = clk_prepare_enable(drv->axi_clk);
+	if (ret)
+		goto err_axi_clk;
+	ret = clk_prepare_enable(drv->rom_clk);
+	if (ret)
+		goto err_rom_clk;
+	ret = clk_prepare_enable(drv->gpll0_mss_clk);
+	if (ret)
+		goto err_gpll0_mss_clk;
+	ret = clk_prepare_enable(drv->snoc_axi_clk);
+	if (ret)
+		goto err_snoc_axi_clk;
+	ret = clk_prepare_enable(drv->mnoc_axi_clk);
+	if (ret)
+		goto err_mnoc_axi_clk;
+	return 0;
+err_mnoc_axi_clk:
+	clk_disable_unprepare(drv->mnoc_axi_clk);
+err_snoc_axi_clk:
+	clk_disable_unprepare(drv->snoc_axi_clk);
+err_gpll0_mss_clk:
+	clk_disable_unprepare(drv->gpll0_mss_clk);
+err_rom_clk:
+	clk_disable_unprepare(drv->rom_clk);
+err_axi_clk:
+	clk_disable_unprepare(drv->axi_clk);
+err_ahb_clk:
+	clk_disable_unprepare(drv->ahb_clk);
+	return ret;
+}
+
+static void pil_mss_disable_clks(struct q6v5_data *drv)
+{
+	clk_disable_unprepare(drv->mnoc_axi_clk);
+	clk_disable_unprepare(drv->snoc_axi_clk);
+	clk_disable_unprepare(drv->gpll0_mss_clk);
+	clk_disable_unprepare(drv->rom_clk);
+	clk_disable_unprepare(drv->axi_clk);
+	if (!drv->ahb_clk_vote)
+		clk_disable_unprepare(drv->ahb_clk);
+}
+
+static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart)
+{
+	int ret = 0;
+	int scm_ret = 0;
+	struct scm_desc desc = {0};
+
+	desc.args[0] = mss_restart;
+	desc.args[1] = 0;
+	desc.arginfo = SCM_ARGS(2);
+
+	if (drv->restart_reg && !drv->restart_reg_sec) {
+		writel_relaxed(mss_restart, drv->restart_reg);
+		mb();
+		udelay(2);
+	} else if (drv->restart_reg_sec) {
+		if (!is_scm_armv8()) {
+			ret = scm_call(SCM_SVC_PIL, MSS_RESTART_ID,
+					&mss_restart, sizeof(mss_restart),
+					&scm_ret, sizeof(scm_ret));
+		} else {
+			ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+						MSS_RESTART_ID), &desc);
+			scm_ret = desc.ret[0];
+		}
+		if (ret || scm_ret)
+			pr_err("Secure MSS restart failed\n");
+	}
+
+	return ret;
+}
+
+static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
+{
+	struct device *dev = drv->desc.dev;
+	int ret;
+	u32 status;
+	u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
+
+	/* Wait for PBL completion. */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
+				 status != 0, POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(dev, "PBL boot timed out (rc:%d)\n", ret);
+		return ret;
+	}
+	if (status != STATUS_PBL_SUCCESS) {
+		dev_err(dev, "PBL returned unexpected status %d\n", status);
+		return -EINVAL;
+	}
+
+	/* Wait for MBA completion. */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+				status != 0, POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(dev, "MBA boot timed out (rc:%d)\n", ret);
+		return ret;
+	}
+	if (status != STATUS_XPU_UNLOCKED &&
+	    status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
+		dev_err(dev, "MBA returned unexpected status %d\n", status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int pil_mss_shutdown(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int ret = 0;
+
+	if (drv->axi_halt_base) {
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_Q6_HALT_BASE);
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_MODEM_HALT_BASE);
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_NC_HALT_BASE);
+	}
+
+	if (drv->axi_halt_q6)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_q6);
+	if (drv->axi_halt_mss)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_mss);
+	if (drv->axi_halt_nc)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_nc);
+
+	/*
+	 * Software workaround to avoid high MX current during LPASS/MSS
+	 * restart.
+	 */
+	if (drv->mx_spike_wa && drv->ahb_clk_vote) {
+		ret = clk_prepare_enable(drv->ahb_clk);
+		if (!ret)
+			assert_clamps(pil);
+		else
+			dev_err(pil->dev, "error turning ON AHB clock(rc:%d)\n",
+									ret);
+	}
+
+	ret = pil_mss_restart_reg(drv, 1);
+
+	if (drv->is_booted) {
+		pil_mss_disable_clks(drv);
+		pil_mss_power_down(drv);
+		drv->is_booted = false;
+	}
+
+	return ret;
+}
+
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+	int ret = 0;
+	s32 status;
+	u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
+
+	if (err_path) {
+		writel_relaxed(CMD_PILFAIL_NFY_MBA,
+				drv->rmb_base + RMB_MBA_COMMAND);
+		ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+				status == STATUS_MBA_UNLOCKED || status < 0,
+				POLL_INTERVAL_US, val);
+		if (ret)
+			dev_err(pil->dev, "MBA region unlock timed out(rc:%d)\n",
+									ret);
+		else if (status < 0)
+			dev_err(pil->dev, "MBA unlock returned err status: %d\n",
+						status);
+	}
+
+	ret = pil_mss_shutdown(pil);
+
+	if (q6_drv->ahb_clk_vote)
+		clk_disable_unprepare(q6_drv->ahb_clk);
+
+	/* In case of any failure where reclaiming MBA and DP memory
+	 * could not happen, free the memory here
+	 */
+	if (drv->q6->mba_dp_virt) {
+		if (pil->subsys_vmid > 0)
+			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+						drv->q6->mba_dp_size);
+		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				drv->attrs_dma);
+		drv->q6->mba_dp_virt = NULL;
+	}
+
+	return ret;
+}
+
+int pil_mss_deinit_image(struct pil_desc *pil)
+{
+	return __pil_mss_deinit_image(pil, true);
+}
+
+int pil_mss_make_proxy_votes(struct pil_desc *pil)
+{
+	int ret;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv = 0;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_mx-uV", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_mx-uV property(rc:%d)\n", ret);
+		return ret;
+	}
+
+	ret = regulator_set_voltage(drv->vreg_mx, uv, INT_MAX);
+	if (ret) {
+		dev_err(pil->dev, "Failed to request vreg_mx voltage(rc:%d)\n",
+									ret);
+		return ret;
+	}
+
+	ret = regulator_enable(drv->vreg_mx);
+	if (ret) {
+		dev_err(pil->dev, "Failed to enable vreg_mx(rc:%d)\n", ret);
+		regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+		return ret;
+	}
+
+	ret = pil_q6v5_make_proxy_votes(pil);
+	if (ret) {
+		regulator_disable(drv->vreg_mx);
+		regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+	}
+
+	return ret;
+}
+
+void pil_mss_remove_proxy_votes(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	pil_q6v5_remove_proxy_votes(pil);
+	regulator_disable(drv->vreg_mx);
+	regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+}
+
+static int pil_mss_mem_setup(struct pil_desc *pil,
+					phys_addr_t addr, size_t size)
+{
+	struct modem_data *md = dev_get_drvdata(pil->dev);
+
+	struct pas_init_image_req {
+		u32	proc;
+		u32	start_addr;
+		u32	len;
+	} request;
+	u32 scm_ret = 0;
+	int ret;
+	struct scm_desc desc = {0};
+
+	if (!md->subsys_desc.pil_mss_memsetup)
+		return 0;
+
+	request.proc = md->pas_id;
+	request.start_addr = addr;
+	request.len = size;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
+				sizeof(request), &scm_ret, sizeof(scm_ret));
+	} else {
+		desc.args[0] = md->pas_id;
+		desc.args[1] = addr;
+		desc.args[2] = size;
+		desc.arginfo = SCM_ARGS(3);
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
+				&desc);
+		scm_ret = desc.ret[0];
+	}
+	if (ret)
+		return ret;
+	return scm_ret;
+}
+
+static int pil_mss_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	phys_addr_t start_addr = pil_get_entry_addr(pil);
+	int ret;
+
+	if (drv->mba_dp_phys)
+		start_addr = drv->mba_dp_phys;
+
+	/*
+	 * Bring subsystem out of reset and enable required
+	 * regulators and clocks.
+	 */
+	ret = pil_mss_power_up(drv);
+	if (ret)
+		goto err_power;
+
+	/* Deassert reset to subsystem and wait for propagation */
+	ret = pil_mss_restart_reg(drv, 0);
+	if (ret)
+		goto err_restart;
+
+	ret = pil_mss_enable_clks(drv);
+	if (ret)
+		goto err_clks;
+
+	if (modem_dbg_cfg)
+		writel_relaxed(modem_dbg_cfg, drv->reg_base + QDSP6SS_DBG_CFG);
+
+	/* Program Image Address */
+	if (drv->self_auth) {
+		writel_relaxed(start_addr, drv->rmb_base + RMB_MBA_IMAGE);
+		/*
+		 * Ensure write to RMB base occurs before reset
+		 * is released.
+		 */
+		mb();
+	} else {
+		writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
+				drv->reg_base + QDSP6SS_RST_EVB);
+	}
+
+	/* Program DP Address */
+	if (drv->dp_size) {
+		writel_relaxed(start_addr + SZ_1M, drv->rmb_base +
+			       RMB_PMI_CODE_START);
+		writel_relaxed(drv->dp_size, drv->rmb_base +
+			       RMB_PMI_CODE_LENGTH);
+	} else {
+		writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_START);
+		writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+	}
+	/* Make sure RMB regs are written before bringing modem out of reset */
+	mb();
+
+	ret = pil_q6v5_reset(pil);
+	if (ret)
+		goto err_q6v5_reset;
+
+	/* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
+	if (drv->self_auth) {
+		ret = pil_msa_wait_for_mba_ready(drv);
+		if (ret)
+			goto err_q6v5_reset;
+	}
+
+	dev_info(pil->dev, "MBA boot done\n");
+	drv->is_booted = true;
+
+	return 0;
+
+err_q6v5_reset:
+	modem_log_rmb_regs(drv->rmb_base);
+	pil_mss_disable_clks(drv);
+	if (drv->ahb_clk_vote)
+		clk_disable_unprepare(drv->ahb_clk);
+err_clks:
+	pil_mss_restart_reg(drv, 1);
+err_restart:
+	pil_mss_power_down(drv);
+err_power:
+	return ret;
+}
+
+int pil_mss_reset_load_mba(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	struct modem_data *md = dev_get_drvdata(pil->dev);
+	const struct firmware *fw, *dp_fw = NULL;
+	char fw_name_legacy[10] = "mba.b00";
+	char fw_name[10] = "mba.mbn";
+	char *dp_name = "msadp";
+	char *fw_name_p;
+	void *mba_dp_virt;
+	dma_addr_t mba_dp_phys, mba_dp_phys_end;
+	int ret, count;
+	const u8 *data;
+
+	fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
+	ret = request_firmware(&fw, fw_name_p, pil->dev);
+	if (ret) {
+		dev_err(pil->dev, "Failed to locate %s (rc:%d)\n",
+						fw_name_p, ret);
+		return ret;
+	}
+
+	data = fw ? fw->data : NULL;
+	if (!data) {
+		dev_err(pil->dev, "MBA data is NULL\n");
+		ret = -ENOMEM;
+		goto err_invalid_fw;
+	}
+
+	drv->mba_dp_size = SZ_1M;
+
+	arch_setup_dma_ops(&md->mba_mem_dev, 0, 0, NULL, 0);
+
+	md->mba_mem_dev.coherent_dma_mask =
+		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	md->attrs_dma = 0;
+	md->attrs_dma |= DMA_ATTR_STRONGLY_ORDERED;
+
+	ret = request_firmware(&dp_fw, dp_name, pil->dev);
+	if (ret) {
+		dev_warn(pil->dev, "Debug policy not present - %s. Continue.\n",
+						dp_name);
+	} else {
+		if (!dp_fw || !dp_fw->data) {
+			dev_err(pil->dev, "Invalid DP firmware\n");
+			ret = -ENOMEM;
+			goto err_invalid_fw;
+		}
+		drv->dp_size = dp_fw->size;
+		drv->mba_dp_size += drv->dp_size;
+	}
+
+	mba_dp_virt = dma_alloc_attrs(&md->mba_mem_dev, drv->mba_dp_size,
+			&mba_dp_phys, GFP_KERNEL, md->attrs_dma);
+	if (!mba_dp_virt) {
+		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
+		ret = -ENOMEM;
+		goto err_invalid_fw;
+	}
+
+	/* Make sure there are no mappings in PKMAP and fixmap */
+	kmap_flush_unused();
+	kmap_atomic_flush_unused();
+
+	drv->mba_dp_phys = mba_dp_phys;
+	drv->mba_dp_virt = mba_dp_virt;
+	mba_dp_phys_end = mba_dp_phys + drv->mba_dp_size;
+
+	dev_info(pil->dev, "Loading MBA and DP (if present) from %pa to %pa\n",
+					&mba_dp_phys, &mba_dp_phys_end);
+
+	/* Load the MBA image into memory */
+	count = fw->size;
+	memcpy(mba_dp_virt, data, count);
+	/* Ensure memcpy of the MBA memory is done before loading the DP */
+	wmb();
+
+	/* Load the DP image into memory */
+	if (drv->mba_dp_size > SZ_1M) {
+		memcpy(mba_dp_virt + SZ_1M, dp_fw->data, dp_fw->size);
+		/* Ensure memcpy is done before powering up modem */
+		wmb();
+	}
+
+	if (pil->subsys_vmid > 0) {
+		ret = pil_assign_mem_to_subsys(pil, drv->mba_dp_phys,
+							drv->mba_dp_size);
+		if (ret) {
+			pr_err("scm_call to unprotect MBA and DP mem failed(rc:%d)\n",
+									ret);
+			goto err_mba_data;
+		}
+	}
+
+	ret = pil_mss_reset(pil);
+	if (ret) {
+		dev_err(pil->dev, "MBA boot failed(rc:%d)\n", ret);
+		goto err_mss_reset;
+	}
+
+	if (dp_fw)
+		release_firmware(dp_fw);
+	release_firmware(fw);
+
+	return 0;
+
+err_mss_reset:
+	if (pil->subsys_vmid > 0)
+		pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
+							drv->mba_dp_size);
+err_mba_data:
+	dma_free_attrs(&md->mba_mem_dev, drv->mba_dp_size, drv->mba_dp_virt,
+				drv->mba_dp_phys, md->attrs_dma);
+err_invalid_fw:
+	if (dp_fw)
+		release_firmware(dp_fw);
+	release_firmware(fw);
+	drv->mba_dp_virt = NULL;
+	return ret;
+}
+
+static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
+					size_t size)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	void *mdata_virt;
+	dma_addr_t mdata_phys;
+	s32 status;
+	int ret;
+	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+	unsigned long attrs = 0;
+
+	drv->mba_mem_dev.coherent_dma_mask =
+		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	attrs |= DMA_ATTR_STRONGLY_ORDERED;
+	/* Make metadata physically contiguous and 4K aligned. */
+	mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
+					GFP_KERNEL, attrs);
+	if (!mdata_virt) {
+		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+	memcpy(mdata_virt, metadata, size);
+	/* wmb() ensures copy completes prior to starting authentication. */
+	wmb();
+
+	if (pil->subsys_vmid > 0) {
+		ret = pil_assign_mem_to_subsys(pil, mdata_phys,
+							ALIGN(size, SZ_4K));
+		if (ret) {
+			pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
+									ret);
+			dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt,
+							mdata_phys, attrs);
+			goto fail;
+		}
+	}
+
+	/* Initialize length counter to 0 */
+	writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	/* Pass address of meta-data to the MBA and perform authentication */
+	writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
+	writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+			status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
+			POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(pil->dev, "MBA authentication of headers timed out(rc:%d)\n",
+								ret);
+	} else if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d for headers\n",
+				status);
+		ret = -EINVAL;
+	}
+
+	if (pil->subsys_vmid > 0)
+		pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
+
+	dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt, mdata_phys, attrs);
+
+	if (!ret)
+		return ret;
+
+fail:
+	modem_log_rmb_regs(drv->rmb_base);
+	if (drv->q6) {
+		pil_mss_shutdown(pil);
+		if (pil->subsys_vmid > 0)
+			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+						drv->q6->mba_dp_size);
+		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				drv->attrs_dma);
+		drv->q6->mba_dp_virt = NULL;
+
+	}
+	return ret;
+}
+
+static int pil_msa_mss_reset_mba_load_auth_mdt(struct pil_desc *pil,
+				  const u8 *metadata, size_t size)
+{
+	int ret;
+
+	ret = pil_mss_reset_load_mba(pil);
+	if (ret)
+		return ret;
+
+	return pil_msa_auth_modem_mdt(pil, metadata, size);
+}
+
+static int pil_msa_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
+				   size_t size)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	s32 status;
+	u32 img_length = readl_relaxed(drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	/* Begin image authentication */
+	if (img_length == 0) {
+		writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
+		writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
+	}
+	/* Increment length counter */
+	img_length += size;
+	writel_relaxed(img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
+	if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d\n", status);
+		modem_log_rmb_regs(drv->rmb_base);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pil_msa_mba_auth(struct pil_desc *pil)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+	int ret;
+	s32 status;
+	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+
+	/* Wait for all segments to be authenticated or an error to occur */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+		status == STATUS_AUTH_COMPLETE || status < 0, 50, val);
+	if (ret) {
+		dev_err(pil->dev, "MBA authentication of image timed out(rc:%d)\n",
+									ret);
+	} else if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d for image\n", status);
+		ret = -EINVAL;
+	}
+
+	if (drv->q6) {
+		if (drv->q6->mba_dp_virt) {
+			/* Reclaim MBA and DP (if allocated) memory. */
+			if (pil->subsys_vmid > 0)
+				pil_assign_mem_to_linux(pil,
+					drv->q6->mba_dp_phys,
+					drv->q6->mba_dp_size);
+			dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+					drv->q6->mba_dp_virt,
+					drv->q6->mba_dp_phys, drv->attrs_dma);
+
+			drv->q6->mba_dp_virt = NULL;
+		}
+	}
+	if (ret)
+		modem_log_rmb_regs(drv->rmb_base);
+	if (q6_drv->ahb_clk_vote)
+		clk_disable_unprepare(q6_drv->ahb_clk);
+
+	return ret;
+}
+
+/*
+ * To be used only if self-auth is disabled, or if the
+ * MBA image is loaded as segments and not in init_image.
+ */
+struct pil_reset_ops pil_msa_mss_ops = {
+	.proxy_vote = pil_mss_make_proxy_votes,
+	.proxy_unvote = pil_mss_remove_proxy_votes,
+	.auth_and_reset = pil_mss_reset,
+	.shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if self-auth is enabled and the MBA is to be loaded
+ * in init_image and the modem headers are also to be authenticated
+ * in init_image. Modem segments authenticated in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_mss_ops_selfauth = {
+	.init_image = pil_msa_mss_reset_mba_load_auth_mdt,
+	.proxy_vote = pil_mss_make_proxy_votes,
+	.proxy_unvote = pil_mss_remove_proxy_votes,
+	.mem_setup = pil_mss_mem_setup,
+	.verify_blob = pil_msa_mba_verify_blob,
+	.auth_and_reset = pil_msa_mba_auth,
+	.deinit_image = pil_mss_deinit_image,
+	.shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if the modem headers are to be authenticated
+ * in init_image, and the modem segments in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_femto_mba_ops = {
+	.init_image = pil_msa_auth_modem_mdt,
+	.verify_blob = pil_msa_mba_verify_blob,
+	.auth_and_reset = pil_msa_mba_auth,
+};
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
new file mode 100644
index 0000000..3af6368
--- /dev/null
+++ b/drivers/soc/qcom/pil-msa.h
@@ -0,0 +1,48 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_PIL_MSA_H
+#define __MSM_PIL_MSA_H
+
+#include <soc/qcom/subsystem_restart.h>
+
+#include "peripheral-loader.h"
+
+#define VDD_MSS_UV	1000000
+
+struct modem_data {
+	struct q6v5_data *q6;
+	struct subsys_device *subsys;
+	struct subsys_desc subsys_desc;
+	void *ramdump_dev;
+	bool crash_shutdown;
+	u32 pas_id;
+	bool ignore_errors;
+	struct completion stop_ack;
+	void __iomem *rmb_base;
+	struct clk *xo;
+	struct pil_desc desc;
+	struct device mba_mem_dev;
+	unsigned long attrs_dma;
+};
+
+extern struct pil_reset_ops pil_msa_mss_ops;
+extern struct pil_reset_ops pil_msa_mss_ops_selfauth;
+extern struct pil_reset_ops pil_msa_femto_mba_ops;
+
+int pil_mss_reset_load_mba(struct pil_desc *pil);
+int pil_mss_make_proxy_votes(struct pil_desc *pil);
+void pil_mss_remove_proxy_votes(struct pil_desc *pil);
+int pil_mss_shutdown(struct pil_desc *pil);
+int pil_mss_deinit_image(struct pil_desc *pil);
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path);
+#endif
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
new file mode 100644
index 0000000..9308b8d
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_gpio.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/smem.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+#define MAX_VDD_MSS_UV		1150000
+#define PROXY_TIMEOUT_MS	10000
+#define MAX_SSR_REASON_LEN	81U
+#define STOP_ACK_TIMEOUT_MS	1000
+
+#define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
+
+static void log_modem_sfr(void)
+{
+	u32 size;
+	char *smem_reason, reason[MAX_SSR_REASON_LEN];
+
+	smem_reason = smem_get_entry_no_rlock(SMEM_SSR_REASON_MSS0, &size, 0,
+							SMEM_ANY_HOST_FLAG);
+	if (!smem_reason || !size) {
+		pr_err("modem subsystem failure reason: (unknown, smem_get_entry_no_rlock failed).\n");
+		return;
+	}
+	if (!smem_reason[0]) {
+		pr_err("modem subsystem failure reason: (unknown, empty string found).\n");
+		return;
+	}
+
+	strlcpy(reason, smem_reason, min(size, MAX_SSR_REASON_LEN));
+	pr_err("modem subsystem failure reason: %s.\n", reason);
+
+	smem_reason[0] = '\0';
+	wmb();
+}
+
+static void restart_modem(struct modem_data *drv)
+{
+	log_modem_sfr();
+	drv->ignore_errors = true;
+	subsystem_restart_dev(drv->subsys);
+}
+
+static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	/* Ignore if we're the one that set the force stop GPIO */
+	if (drv->crash_shutdown)
+		return IRQ_HANDLED;
+
+	pr_err("Fatal error on the modem.\n");
+	subsys_set_crash_status(drv->subsys, true);
+	restart_modem(drv);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t modem_stop_ack_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	pr_info("Received stop ack interrupt from modem\n");
+	complete(&drv->stop_ack);
+	return IRQ_HANDLED;
+}
+
+static int modem_shutdown(const struct subsys_desc *subsys, bool force_stop)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+	unsigned long ret;
+
+	if (subsys->is_not_loadable)
+		return 0;
+
+	if (!subsys_get_crash_status(drv->subsys) && force_stop &&
+	    subsys->force_stop_gpio) {
+		gpio_set_value(subsys->force_stop_gpio, 1);
+		ret = wait_for_completion_timeout(&drv->stop_ack,
+				msecs_to_jiffies(STOP_ACK_TIMEOUT_MS));
+		if (!ret)
+			pr_warn("Timed out on stop ack from modem.\n");
+		gpio_set_value(subsys->force_stop_gpio, 0);
+	}
+
+	if (drv->subsys_desc.ramdump_disable_gpio) {
+		drv->subsys_desc.ramdump_disable = gpio_get_value(
+					drv->subsys_desc.ramdump_disable_gpio);
+		 pr_warn("Ramdump disable gpio value is %d\n",
+			drv->subsys_desc.ramdump_disable);
+	}
+
+	pil_shutdown(&drv->q6->desc);
+
+	return 0;
+}
+
+static int modem_powerup(const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+
+	if (subsys->is_not_loadable)
+		return 0;
+	/*
+	 * At this time, the modem is shutdown. Therefore this function cannot
+	 * run concurrently with the watchdog bite error handler, making it safe
+	 * to unset the flag below.
+	 */
+	reinit_completion(&drv->stop_ack);
+	drv->subsys_desc.ramdump_disable = 0;
+	drv->ignore_errors = false;
+	drv->q6->desc.fw_name = subsys->fw_name;
+	return pil_boot(&drv->q6->desc);
+}
+
+static void modem_crash_shutdown(const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+
+	drv->crash_shutdown = true;
+	if (!subsys_get_crash_status(drv->subsys) &&
+		subsys->force_stop_gpio) {
+		gpio_set_value(subsys->force_stop_gpio, 1);
+		mdelay(STOP_ACK_TIMEOUT_MS);
+	}
+}
+
+static int modem_ramdump(int enable, const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+	int ret;
+
+	if (!enable)
+		return 0;
+
+	ret = pil_mss_make_proxy_votes(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	ret = pil_mss_reset_load_mba(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	ret = pil_do_ramdump(&drv->q6->desc, drv->ramdump_dev);
+	if (ret < 0)
+		pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
+
+	ret = __pil_mss_deinit_image(&drv->q6->desc, false);
+	if (ret < 0)
+		pr_err("Unable to free up resources (rc = %d).\n", ret);
+
+	pil_mss_remove_proxy_votes(&drv->q6->desc);
+	return ret;
+}
+
+static irqreturn_t modem_wdog_bite_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	if (drv->ignore_errors)
+		return IRQ_HANDLED;
+
+	pr_err("Watchdog bite received from modem software!\n");
+	if (drv->subsys_desc.system_debug &&
+			!gpio_get_value(drv->subsys_desc.err_fatal_gpio))
+		panic("%s: System ramdump requested. Triggering device restart!\n",
+							__func__);
+	subsys_set_crash_status(drv->subsys, true);
+	restart_modem(drv);
+	return IRQ_HANDLED;
+}
+
+static int pil_subsys_init(struct modem_data *drv,
+					struct platform_device *pdev)
+{
+	int ret;
+
+	drv->subsys_desc.name = "modem";
+	drv->subsys_desc.dev = &pdev->dev;
+	drv->subsys_desc.owner = THIS_MODULE;
+	drv->subsys_desc.shutdown = modem_shutdown;
+	drv->subsys_desc.powerup = modem_powerup;
+	drv->subsys_desc.ramdump = modem_ramdump;
+	drv->subsys_desc.crash_shutdown = modem_crash_shutdown;
+	drv->subsys_desc.err_fatal_handler = modem_err_fatal_intr_handler;
+	drv->subsys_desc.stop_ack_handler = modem_stop_ack_intr_handler;
+	drv->subsys_desc.wdog_bite_handler = modem_wdog_bite_intr_handler;
+
+	drv->q6->desc.modem_ssr = false;
+	drv->subsys = subsys_register(&drv->subsys_desc);
+	if (IS_ERR(drv->subsys)) {
+		ret = PTR_ERR(drv->subsys);
+		goto err_subsys;
+	}
+
+	drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
+	if (!drv->ramdump_dev) {
+		pr_err("%s: Unable to create a modem ramdump device.\n",
+			__func__);
+		ret = -ENOMEM;
+		goto err_ramdump;
+	}
+
+	return 0;
+
+err_ramdump:
+	subsys_unregister(drv->subsys);
+err_subsys:
+	return ret;
+}
+
+static int pil_mss_loadable_init(struct modem_data *drv,
+					struct platform_device *pdev)
+{
+	struct q6v5_data *q6;
+	struct pil_desc *q6_desc;
+	struct resource *res;
+	struct property *prop;
+	int ret;
+
+	q6 = pil_q6v5_init(pdev);
+	if (IS_ERR_OR_NULL(q6))
+		return PTR_ERR(q6);
+	drv->q6 = q6;
+	drv->xo = q6->xo;
+
+	q6_desc = &q6->desc;
+	q6_desc->owner = THIS_MODULE;
+	q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
+
+	q6_desc->ops = &pil_msa_mss_ops;
+
+	q6->self_auth = of_property_read_bool(pdev->dev.of_node,
+							"qcom,pil-self-auth");
+	if (q6->self_auth) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						    "rmb_base");
+		q6->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+		if (!q6->rmb_base)
+			return -ENOMEM;
+		drv->rmb_base = q6->rmb_base;
+		q6_desc->ops = &pil_msa_mss_ops_selfauth;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
+	if (!res) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"restart_reg_sec");
+		q6->restart_reg_sec = true;
+	}
+
+	q6->restart_reg = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+	if (!q6->restart_reg)
+		return -ENOMEM;
+
+	q6->vreg = NULL;
+
+	prop = of_find_property(pdev->dev.of_node, "vdd_mss-supply", NULL);
+	if (prop) {
+		q6->vreg = devm_regulator_get(&pdev->dev, "vdd_mss");
+		if (IS_ERR(q6->vreg))
+			return PTR_ERR(q6->vreg);
+
+		ret = regulator_set_voltage(q6->vreg, VDD_MSS_UV,
+						MAX_VDD_MSS_UV);
+		if (ret)
+			dev_err(&pdev->dev, "Failed to set vreg voltage(rc:%d)\n",
+									ret);
+
+		ret = regulator_set_load(q6->vreg, 100000);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "Failed to set vreg mode(rc:%d)\n",
+									ret);
+			return ret;
+		}
+	}
+
+	q6->vreg_mx = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(q6->vreg_mx))
+		return PTR_ERR(q6->vreg_mx);
+	prop = of_find_property(pdev->dev.of_node, "vdd_mx-uV", NULL);
+	if (!prop) {
+		dev_err(&pdev->dev, "Missing vdd_mx-uV property\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+		"cxrail_bhs_reg");
+	if (res)
+		q6->cxrail_bhs = devm_ioremap(&pdev->dev, res->start,
+					  resource_size(res));
+
+	q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(q6->ahb_clk))
+		return PTR_ERR(q6->ahb_clk);
+
+	q6->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
+	if (IS_ERR(q6->axi_clk))
+		return PTR_ERR(q6->axi_clk);
+
+	q6->rom_clk = devm_clk_get(&pdev->dev, "mem_clk");
+	if (IS_ERR(q6->rom_clk))
+		return PTR_ERR(q6->rom_clk);
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+					"qcom,pas-id", &drv->pas_id);
+	if (ret)
+		dev_info(&pdev->dev, "No pas_id found.\n");
+
+	drv->subsys_desc.pil_mss_memsetup =
+	of_property_read_bool(pdev->dev.of_node, "qcom,pil-mss-memsetup");
+
+	/* Optional. */
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "gpll0_mss_clk") >= 0)
+		q6->gpll0_mss_clk = devm_clk_get(&pdev->dev, "gpll0_mss_clk");
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "snoc_axi_clk") >= 0)
+		q6->snoc_axi_clk = devm_clk_get(&pdev->dev, "snoc_axi_clk");
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "mnoc_axi_clk") >= 0)
+		q6->mnoc_axi_clk = devm_clk_get(&pdev->dev, "mnoc_axi_clk");
+
+	ret = pil_desc_init(q6_desc);
+
+	return ret;
+}
+
+static int pil_mss_driver_probe(struct platform_device *pdev)
+{
+	struct modem_data *drv;
+	int ret, is_not_loadable;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, drv);
+
+	is_not_loadable = of_property_read_bool(pdev->dev.of_node,
+							"qcom,is-not-loadable");
+	if (is_not_loadable) {
+		drv->subsys_desc.is_not_loadable = 1;
+	} else {
+		ret = pil_mss_loadable_init(drv, pdev);
+		if (ret)
+			return ret;
+	}
+	init_completion(&drv->stop_ack);
+
+	return pil_subsys_init(drv, pdev);
+}
+
+static int pil_mss_driver_exit(struct platform_device *pdev)
+{
+	struct modem_data *drv = platform_get_drvdata(pdev);
+
+	subsys_unregister(drv->subsys);
+	destroy_ramdump_device(drv->ramdump_dev);
+	pil_desc_release(&drv->q6->desc);
+	return 0;
+}
+
+static const struct of_device_id mss_match_table[] = {
+	{ .compatible = "qcom,pil-q6v5-mss" },
+	{ .compatible = "qcom,pil-q6v55-mss" },
+	{ .compatible = "qcom,pil-q6v56-mss" },
+	{}
+};
+
+static struct platform_driver pil_mss_driver = {
+	.probe = pil_mss_driver_probe,
+	.remove = pil_mss_driver_exit,
+	.driver = {
+		.name = "pil-q6v5-mss",
+		.of_match_table = mss_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pil_mss_init(void)
+{
+	return platform_driver_register(&pil_mss_driver);
+}
+module_init(pil_mss_init);
+
+static void __exit pil_mss_exit(void)
+{
+	platform_driver_unregister(&pil_mss_driver);
+}
+module_exit(pil_mss_exit);
+
+MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
new file mode 100644
index 0000000..9fe204b
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -0,0 +1,742 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+
+/* QDSP6SS Register Offsets */
+#define QDSP6SS_RESET			0x014
+#define QDSP6SS_GFMUX_CTL		0x020
+#define QDSP6SS_PWR_CTL			0x030
+#define QDSP6V6SS_MEM_PWR_CTL		0x034
+#define QDSP6SS_BHS_STATUS		0x078
+#define QDSP6SS_MEM_PWR_CTL		0x0B0
+#define QDSP6SS_STRAP_ACC		0x110
+#define QDSP6V62SS_BHS_STATUS		0x0C4
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ			0x0
+#define AXI_HALTACK			0x4
+#define AXI_IDLE			0x8
+
+#define HALT_ACK_TIMEOUT_US		100000
+
+/* QDSP6SS_RESET */
+#define Q6SS_STOP_CORE			BIT(0)
+#define Q6SS_CORE_ARES			BIT(1)
+#define Q6SS_BUS_ARES_ENA		BIT(2)
+
+/* QDSP6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENA			BIT(1)
+#define Q6SS_CLK_SRC_SEL_C		BIT(3)
+#define Q6SS_CLK_SRC_SEL_FIELD		0xC
+#define Q6SS_CLK_SRC_SWITCH_CLK_OVR	BIT(8)
+
+/* QDSP6SS_PWR_CTL */
+#define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
+#define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
+#define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
+#define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
+#define Q6SS_ETB_SLP_NRET_N		BIT(17)
+#define Q6SS_L2DATA_STBY_N		BIT(18)
+#define Q6SS_SLP_RET_N			BIT(19)
+#define Q6SS_CLAMP_IO			BIT(20)
+#define QDSS_BHS_ON			BIT(21)
+#define QDSS_LDO_BYP			BIT(22)
+
+/* QDSP6v55 parameters */
+#define QDSP6v55_LDO_ON                 BIT(26)
+#define QDSP6v55_LDO_BYP                BIT(25)
+#define QDSP6v55_BHS_ON                 BIT(24)
+#define QDSP6v55_CLAMP_WL               BIT(21)
+#define QDSP6v55_CLAMP_QMC_MEM          BIT(22)
+#define L1IU_SLP_NRET_N                 BIT(15)
+#define L1DU_SLP_NRET_N                 BIT(14)
+#define L2PLRU_SLP_NRET_N               BIT(13)
+#define QDSP6v55_BHS_EN_REST_ACK        BIT(0)
+
+#define HALT_CHECK_MAX_LOOPS            (200)
+#define BHS_CHECK_MAX_LOOPS             (200)
+#define QDSP6SS_XO_CBCR                 (0x0038)
+
+#define QDSP6SS_ACC_OVERRIDE_VAL	0x20
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil)
+{
+	int ret;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+								ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(drv->xo);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for XO(rc:%d)\n", ret);
+		goto out;
+	}
+
+	ret = clk_prepare_enable(drv->pnoc_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for pnoc(rc:%d)\n", ret);
+		goto err_pnoc_vote;
+	}
+
+	ret = clk_prepare_enable(drv->qdss_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for qdss(rc:%d)\n", ret);
+		goto err_qdss_vote;
+	}
+
+	ret = regulator_set_voltage(drv->vreg_cx, uv, INT_MAX);
+	if (ret) {
+		dev_err(pil->dev, "Failed to request vdd_cx voltage(rc:%d)\n",
+								ret);
+		goto err_cx_voltage;
+	}
+
+	ret = regulator_set_load(drv->vreg_cx, 100000);
+	if (ret < 0) {
+		dev_err(pil->dev, "Failed to set vdd_cx mode(rc:%d)\n", ret);
+		goto err_cx_mode;
+	}
+
+	ret = regulator_enable(drv->vreg_cx);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for vdd_cx(rc:%d)\n", ret);
+		goto err_cx_enable;
+	}
+
+	if (drv->vreg_pll) {
+		ret = regulator_enable(drv->vreg_pll);
+		if (ret) {
+			dev_err(pil->dev, "Failed to vote for vdd_pll(rc:%d)\n",
+									ret);
+			goto err_vreg_pll;
+		}
+	}
+
+	return 0;
+
+err_vreg_pll:
+	regulator_disable(drv->vreg_cx);
+err_cx_enable:
+	regulator_set_load(drv->vreg_cx, 0);
+err_cx_mode:
+	regulator_set_voltage(drv->vreg_cx, 0, uv);
+err_cx_voltage:
+	clk_disable_unprepare(drv->qdss_clk);
+err_qdss_vote:
+	clk_disable_unprepare(drv->pnoc_clk);
+err_pnoc_vote:
+	clk_disable_unprepare(drv->xo);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(pil_q6v5_make_proxy_votes);
+
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv, ret = 0;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+									ret);
+		return;
+	}
+
+	if (drv->vreg_pll) {
+		regulator_disable(drv->vreg_pll);
+		regulator_set_load(drv->vreg_pll, 0);
+	}
+	regulator_disable(drv->vreg_cx);
+	regulator_set_load(drv->vreg_cx, 0);
+	regulator_set_voltage(drv->vreg_cx, 0, uv);
+	clk_disable_unprepare(drv->xo);
+	clk_disable_unprepare(drv->pnoc_clk);
+	clk_disable_unprepare(drv->qdss_clk);
+}
+EXPORT_SYMBOL(pil_q6v5_remove_proxy_votes);
+
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base)
+{
+	int ret;
+	u32 status;
+
+	/* Assert halt request */
+	writel_relaxed(1, halt_base + AXI_HALTREQ);
+
+	/* Wait for halt */
+	ret = readl_poll_timeout(halt_base + AXI_HALTACK,
+		status, status != 0, 50, HALT_ACK_TIMEOUT_US);
+	if (ret)
+		dev_warn(pil->dev, "Port %p halt timeout\n", halt_base);
+	else if (!readl_relaxed(halt_base + AXI_IDLE))
+		dev_warn(pil->dev, "Port %p halt failed\n", halt_base);
+
+	/* Clear halt request (port will remain halted until reset) */
+	writel_relaxed(0, halt_base + AXI_HALTREQ);
+}
+EXPORT_SYMBOL(pil_q6v5_halt_axi_port);
+
+void assert_clamps(struct pil_desc *pil)
+{
+	u32 val;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	/*
+	 * Assert QDSP6 I/O clamp, memory wordline clamp, and compiler memory
+	 * clamp as a software workaround to avoid high MX current during
+	 * LPASS/MSS restart.
+	 */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= (Q6SS_CLAMP_IO | QDSP6v55_CLAMP_WL |
+			QDSP6v55_CLAMP_QMC_MEM);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	/* To make sure asserting clamps is done before MSS restart*/
+	mb();
+}
+
+static void __pil_q6v5_shutdown(struct pil_desc *pil)
+{
+	u32 val;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	/* Turn off core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val &= ~Q6SS_CLK_ENA;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	/* Clamp IO */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Turn off Q6 memories */
+	val &= ~(Q6SS_L2DATA_SLP_NRET_N_0 | Q6SS_L2DATA_SLP_NRET_N_1 |
+		 Q6SS_L2DATA_SLP_NRET_N_2 | Q6SS_SLP_RET_N |
+		 Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLP_NRET_N |
+		 Q6SS_L2DATA_STBY_N);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Assert Q6 resets */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Kill power at block headswitch */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val &= ~QDSS_BHS_ON;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+}
+
+void pil_q6v5_shutdown(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	if (drv->qdsp6v55) {
+		/* Subsystem driver expected to halt bus and assert reset */
+		return;
+	}
+	__pil_q6v5_shutdown(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_shutdown);
+
+static int __pil_q6v5_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 val;
+
+	/* Assert resets, stop core */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Enable power block headswitch, and wait for it to stabilize */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	mb();
+	udelay(1);
+
+	/*
+	 * Turn on memories. L2 banks should be done individually
+	 * to minimize inrush current.
+	 */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+	       Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_2;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_1;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_0;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Remove IO clamp */
+	val &= ~Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Bring core out of reset */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~Q6SS_CORE_ARES;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Turn on core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val |= Q6SS_CLK_ENA;
+
+	/* Need a different clock source for v5.2.0 */
+	if (drv->qdsp6v5_2_0) {
+		val &= ~Q6SS_CLK_SRC_SEL_FIELD;
+		val |= Q6SS_CLK_SRC_SEL_C;
+	}
+
+	/* force clock on during source switch */
+	if (drv->qdsp6v56)
+		val |= Q6SS_CLK_SRC_SWITCH_CLK_OVR;
+
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	/* Start core execution */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~Q6SS_STOP_CORE;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	return 0;
+}
+
+static int q6v55_branch_clk_enable(struct q6v5_data *drv)
+{
+	u32 val, count;
+	void __iomem *cbcr_reg = drv->reg_base + QDSP6SS_XO_CBCR;
+
+	val = readl_relaxed(cbcr_reg);
+	val |= 0x1;
+	writel_relaxed(val, cbcr_reg);
+
+	for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
+		val = readl_relaxed(cbcr_reg);
+		if (!(val & BIT(31)))
+			return 0;
+		udelay(1);
+	}
+
+	dev_err(drv->desc.dev, "Failed to enable xo branch clock.\n");
+	return -EINVAL;
+}
+
+static int __pil_q6v55_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 val;
+	int i;
+
+	/* Override the ACC value if required */
+	if (drv->override_acc)
+		writel_relaxed(QDSP6SS_ACC_OVERRIDE_VAL,
+				drv->reg_base + QDSP6SS_STRAP_ACC);
+
+	/* Override the ACC value with input value */
+	if (!of_property_read_u32(pil->dev->of_node, "qcom,override-acc-1",
+				&drv->override_acc_1))
+		writel_relaxed(drv->override_acc_1,
+				drv->reg_base + QDSP6SS_STRAP_ACC);
+
+	/* Assert resets, stop core */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* BHS require xo cbcr to be enabled */
+	i = q6v55_branch_clk_enable(drv);
+	if (i)
+		return i;
+
+	/* Enable power block headswitch, and wait for it to stabilize */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= QDSP6v55_BHS_ON;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	mb();
+	udelay(1);
+
+	if (drv->qdsp6v62_1_2 || drv->qdsp6v62_1_5) {
+		for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+			if (readl_relaxed(drv->reg_base + QDSP6V62SS_BHS_STATUS)
+			    & QDSP6v55_BHS_EN_REST_ACK)
+				break;
+			udelay(1);
+		}
+		if (!i) {
+			pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	if (drv->qdsp6v61_1_1) {
+		for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+			if (readl_relaxed(drv->reg_base + QDSP6SS_BHS_STATUS)
+			    & QDSP6v55_BHS_EN_REST_ACK)
+				break;
+			udelay(1);
+		}
+		if (!i) {
+			pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	/* Put LDO in bypass mode */
+	val |= QDSP6v55_LDO_BYP;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	if (drv->qdsp6v56_1_3) {
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2 and ETB memories 1 at a time */
+		for (i = 17; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+			udelay(1);
+		}
+	} else if (drv->qdsp6v56_1_5 || drv->qdsp6v56_1_8
+					|| drv->qdsp6v56_1_10) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+		for (i = 19; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			val |= readl_relaxed(drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else if (drv->qdsp6v56_1_8_inrush_current) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+		for (i = 19; i >= 6; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+
+		for (i = 0 ; i <= 5 ; i++) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else if (drv->qdsp6v61_1_1 || drv->qdsp6v62_1_2 ||
+						drv->qdsp6v62_1_5) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base +
+				QDSP6V6SS_MEM_PWR_CTL);
+
+		if (drv->qdsp6v62_1_5)
+			i = 29;
+		else
+			i = 28;
+
+		for ( ; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+					QDSP6V6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else {
+		/* Turn on memories. */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val |= 0xFFF00;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L2 banks 1 at a time */
+		for (i = 0; i <= 7; i++) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+		}
+	}
+
+	/* Remove word line clamp */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val &= ~QDSP6v55_CLAMP_WL;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Remove IO clamp */
+	val &= ~Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Bring core out of reset */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~(Q6SS_CORE_ARES | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Turn on core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val |= Q6SS_CLK_ENA;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	return 0;
+}
+
+int pil_q6v5_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	if (drv->qdsp6v55)
+		return __pil_q6v55_reset(pil);
+	else
+		return __pil_q6v5_reset(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_reset);
+
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev)
+{
+	struct q6v5_data *drv;
+	struct resource *res;
+	struct pil_desc *desc;
+	struct property *prop;
+	int ret, vdd_pll;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return ERR_PTR(-ENOMEM);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
+	drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (!drv->reg_base)
+		return ERR_PTR(-ENOMEM);
+
+	desc = &drv->desc;
+	ret = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+				      &desc->name);
+	if (ret)
+		return ERR_PTR(ret);
+
+	desc->dev = &pdev->dev;
+
+	drv->qdsp6v5_2_0 = of_device_is_compatible(pdev->dev.of_node,
+						   "qcom,pil-femto-modem");
+
+	if (drv->qdsp6v5_2_0)
+		return drv;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
+	if (res) {
+		drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (!drv->axi_halt_base) {
+			dev_err(&pdev->dev, "Failed to map axi_halt_base.\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	if (!drv->axi_halt_base) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_q6");
+		if (res) {
+			drv->axi_halt_q6 = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_q6) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_q6.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_modem");
+		if (res) {
+			drv->axi_halt_mss = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_mss) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_mss.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_nc");
+		if (res) {
+			drv->axi_halt_nc = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_nc) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_nc.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+	}
+
+	if (!(drv->axi_halt_base || (drv->axi_halt_q6 && drv->axi_halt_mss
+					&& drv->axi_halt_nc))) {
+		dev_err(&pdev->dev, "halt bases for Q6 are not defined.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	drv->qdsp6v55 = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,pil-q6v55-mss");
+	drv->qdsp6v56 = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,pil-q6v56-mss");
+
+	drv->qdsp6v56_1_3 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-3");
+	drv->qdsp6v56_1_5 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-5");
+
+	drv->qdsp6v56_1_8 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-8");
+	drv->qdsp6v56_1_10 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-10");
+
+	drv->qdsp6v56_1_8_inrush_current = of_property_read_bool(
+						pdev->dev.of_node,
+						"qcom,qdsp6v56-1-8-inrush-current");
+
+	drv->qdsp6v61_1_1 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v61-1-1");
+
+	drv->qdsp6v62_1_2 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v62-1-2");
+
+	drv->qdsp6v62_1_5 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v62-1-5");
+
+	drv->non_elf_image = of_property_read_bool(pdev->dev.of_node,
+						"qcom,mba-image-is-not-elf");
+
+	drv->override_acc = of_property_read_bool(pdev->dev.of_node,
+						"qcom,override-acc");
+
+	drv->ahb_clk_vote = of_property_read_bool(pdev->dev.of_node,
+						"qcom,ahb-clk-vote");
+	drv->mx_spike_wa = of_property_read_bool(pdev->dev.of_node,
+						"qcom,mx-spike-wa");
+
+	drv->xo = devm_clk_get(&pdev->dev, "xo");
+	if (IS_ERR(drv->xo))
+		return ERR_CAST(drv->xo);
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,pnoc-clk-vote")) {
+		drv->pnoc_clk = devm_clk_get(&pdev->dev, "pnoc_clk");
+		if (IS_ERR(drv->pnoc_clk))
+			return ERR_CAST(drv->pnoc_clk);
+	} else {
+		drv->pnoc_clk = NULL;
+	}
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,proxy-clock-names", "qdss_clk") >= 0) {
+		drv->qdss_clk = devm_clk_get(&pdev->dev, "qdss_clk");
+		if (IS_ERR(drv->qdss_clk))
+			return ERR_CAST(drv->qdss_clk);
+	} else {
+		drv->qdss_clk = NULL;
+	}
+
+	drv->vreg_cx = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(drv->vreg_cx))
+		return ERR_CAST(drv->vreg_cx);
+	prop = of_find_property(pdev->dev.of_node, "vdd_cx-voltage", NULL);
+	if (!prop) {
+		dev_err(&pdev->dev, "Missing vdd_cx-voltage property\n");
+		return ERR_CAST(prop);
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,vdd_pll",
+		&vdd_pll);
+	if (!ret) {
+		drv->vreg_pll = devm_regulator_get(&pdev->dev, "vdd_pll");
+		if (!IS_ERR_OR_NULL(drv->vreg_pll)) {
+			ret = regulator_set_voltage(drv->vreg_pll, vdd_pll,
+							vdd_pll);
+			if (ret) {
+				dev_err(&pdev->dev, "Failed to set vdd_pll voltage(rc:%d)\n",
+									ret);
+				return ERR_PTR(ret);
+			}
+
+			ret = regulator_set_load(drv->vreg_pll, 10000);
+			if (ret < 0) {
+				dev_err(&pdev->dev, "Failed to set vdd_pll mode(rc:%d)\n",
+									ret);
+				return ERR_PTR(ret);
+			}
+		} else
+			drv->vreg_pll = NULL;
+	}
+
+	return drv;
+}
+EXPORT_SYMBOL(pil_q6v5_init);
diff --git a/drivers/soc/qcom/pil-q6v5.h b/drivers/soc/qcom/pil-q6v5.h
new file mode 100644
index 0000000..0bcce9a
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_PIL_Q6V5_H
+#define __MSM_PIL_Q6V5_H
+
+#include "peripheral-loader.h"
+
+struct regulator;
+struct clk;
+struct pil_device;
+struct platform_device;
+
+struct q6v5_data {
+	void __iomem *reg_base;
+	void __iomem *rmb_base;
+	void __iomem *cxrail_bhs;  /* External BHS register */
+	struct clk *xo;		   /* XO clock source */
+	struct clk *pnoc_clk;	   /* PNOC bus clock source */
+	struct clk *ahb_clk;	   /* PIL access to registers */
+	struct clk *axi_clk;	   /* CPU access to memory */
+	struct clk *core_clk;	   /* CPU core */
+	struct clk *reg_clk;	   /* CPU access registers */
+	struct clk *gpll0_mss_clk; /* GPLL0 to MSS connection */
+	struct clk *rom_clk;	   /* Boot ROM */
+	struct clk *snoc_axi_clk;
+	struct clk *mnoc_axi_clk;
+	struct clk *qdss_clk;
+	void __iomem *axi_halt_base; /* Halt base of q6, mss,
+				      * nc are in same 4K page
+				      */
+	void __iomem *axi_halt_q6;
+	void __iomem *axi_halt_mss;
+	void __iomem *axi_halt_nc;
+	void __iomem *restart_reg;
+	struct regulator *vreg;
+	struct regulator *vreg_cx;
+	struct regulator *vreg_mx;
+	struct regulator *vreg_pll;
+	bool is_booted;
+	struct pil_desc desc;
+	bool self_auth;
+	phys_addr_t mba_dp_phys;
+	void *mba_dp_virt;
+	size_t mba_dp_size;
+	size_t dp_size;
+	bool qdsp6v55;
+	bool qdsp6v5_2_0;
+	bool qdsp6v56;
+	bool qdsp6v56_1_3;
+	bool qdsp6v56_1_5;
+	bool qdsp6v56_1_8;
+	bool qdsp6v56_1_8_inrush_current;
+	bool qdsp6v56_1_10;
+	bool qdsp6v61_1_1;
+	bool qdsp6v62_1_2;
+	bool qdsp6v62_1_5;
+	bool non_elf_image;
+	bool restart_reg_sec;
+	bool override_acc;
+	int override_acc_1;
+	bool ahb_clk_vote;
+	bool mx_spike_wa;
+};
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base);
+void pil_q6v5_shutdown(struct pil_desc *pil);
+int pil_q6v5_reset(struct pil_desc *pil);
+void assert_clamps(struct pil_desc *pil);
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c
new file mode 100644
index 0000000..f917ea9
--- /dev/null
+++ b/drivers/soc/qcom/ramdump.c
@@ -0,0 +1,411 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <linux/elf.h>
+#include <linux/wait.h>
+#include <soc/qcom/ramdump.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+
+#define RAMDUMP_WAIT_MSECS	120000
+
+struct ramdump_device {
+	char name[256];
+
+	unsigned int data_ready;
+	unsigned int consumer_present;
+	int ramdump_status;
+
+	struct completion ramdump_complete;
+	struct miscdevice device;
+
+	wait_queue_head_t dump_wait_q;
+	int nsegments;
+	struct ramdump_segment *segments;
+	size_t elfcore_size;
+	char *elfcore_buf;
+	unsigned long attrs;
+	bool complete_ramdump;
+};
+
+static int ramdump_open(struct inode *inode, struct file *filep)
+{
+	struct ramdump_device *rd_dev = container_of(filep->private_data,
+				struct ramdump_device, device);
+	rd_dev->consumer_present = 1;
+	rd_dev->ramdump_status = 0;
+	return 0;
+}
+
+static int ramdump_release(struct inode *inode, struct file *filep)
+{
+	struct ramdump_device *rd_dev = container_of(filep->private_data,
+				struct ramdump_device, device);
+	rd_dev->consumer_present = 0;
+	rd_dev->data_ready = 0;
+	complete(&rd_dev->ramdump_complete);
+	return 0;
+}
+
+static unsigned long offset_translate(loff_t user_offset,
+		struct ramdump_device *rd_dev, unsigned long *data_left,
+		void **vaddr)
+{
+	int i = 0;
+	*vaddr = NULL;
+
+	for (i = 0; i < rd_dev->nsegments; i++)
+		if (user_offset >= rd_dev->segments[i].size)
+			user_offset -= rd_dev->segments[i].size;
+		else
+			break;
+
+	if (i == rd_dev->nsegments) {
+		pr_debug("Ramdump(%s): offset_translate returning zero\n",
+				rd_dev->name);
+		*data_left = 0;
+		return 0;
+	}
+
+	*data_left = rd_dev->segments[i].size - user_offset;
+
+	pr_debug("Ramdump(%s): Returning address: %llx, data_left = %ld\n",
+		rd_dev->name, rd_dev->segments[i].address + user_offset,
+		*data_left);
+
+	if (rd_dev->segments[i].v_address)
+		*vaddr = rd_dev->segments[i].v_address + user_offset;
+
+	return rd_dev->segments[i].address + user_offset;
+}
+
+#define MAX_IOREMAP_SIZE SZ_1M
+
+static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
+			loff_t *pos)
+{
+	struct ramdump_device *rd_dev = container_of(filep->private_data,
+				struct ramdump_device, device);
+	void *device_mem = NULL, *origdevice_mem = NULL, *vaddr = NULL;
+	unsigned long data_left = 0, bytes_before, bytes_after;
+	unsigned long addr = 0;
+	size_t copy_size = 0, alignsize;
+	unsigned char *alignbuf = NULL, *finalbuf = NULL;
+	int ret = 0;
+	loff_t orig_pos = *pos;
+
+	if ((filep->f_flags & O_NONBLOCK) && !rd_dev->data_ready)
+		return -EAGAIN;
+
+	ret = wait_event_interruptible(rd_dev->dump_wait_q, rd_dev->data_ready);
+	if (ret)
+		return ret;
+
+	if (*pos < rd_dev->elfcore_size) {
+		copy_size = rd_dev->elfcore_size - *pos;
+		copy_size = min(copy_size, count);
+
+		if (copy_to_user(buf, rd_dev->elfcore_buf + *pos, copy_size)) {
+			ret = -EFAULT;
+			goto ramdump_done;
+		}
+		*pos += copy_size;
+		count -= copy_size;
+		buf += copy_size;
+		if (count == 0)
+			return copy_size;
+	}
+
+	addr = offset_translate(*pos - rd_dev->elfcore_size, rd_dev,
+				&data_left, &vaddr);
+
+	/* EOF check */
+	if (data_left == 0) {
+		pr_debug("Ramdump(%s): Ramdump complete. %lld bytes read.",
+			rd_dev->name, *pos);
+		rd_dev->ramdump_status = 0;
+		ret = 0;
+		goto ramdump_done;
+	}
+
+	copy_size = min_t(size_t, count, (size_t)MAX_IOREMAP_SIZE);
+	copy_size = min_t(unsigned long, (unsigned long)copy_size, data_left);
+
+	rd_dev->attrs = 0;
+	rd_dev->attrs |= DMA_ATTR_SKIP_ZEROING;
+	device_mem = vaddr ?: dma_remap(rd_dev->device.parent, NULL, addr,
+						copy_size, rd_dev->attrs);
+	origdevice_mem = device_mem;
+
+	if (device_mem == NULL) {
+		pr_err("Ramdump(%s): Unable to ioremap: addr %lx, size %zd\n",
+			rd_dev->name, addr, copy_size);
+		rd_dev->ramdump_status = -1;
+		ret = -ENOMEM;
+		goto ramdump_done;
+	}
+
+	alignbuf = kzalloc(copy_size, GFP_KERNEL);
+	if (!alignbuf) {
+		pr_err("Ramdump(%s): Unable to alloc mem for aligned buf\n",
+				rd_dev->name);
+		rd_dev->ramdump_status = -1;
+		ret = -ENOMEM;
+		goto ramdump_done;
+	}
+
+	finalbuf = alignbuf;
+	alignsize = copy_size;
+
+	if ((unsigned long)device_mem & 0x7) {
+		bytes_before = 8 - ((unsigned long)device_mem & 0x7);
+		memcpy_fromio(alignbuf, device_mem, bytes_before);
+		device_mem += bytes_before;
+		alignbuf += bytes_before;
+		alignsize -= bytes_before;
+	}
+
+	if (alignsize & 0x7) {
+		bytes_after = alignsize & 0x7;
+		memcpy(alignbuf, device_mem, alignsize - bytes_after);
+		device_mem += alignsize - bytes_after;
+		alignbuf += (alignsize - bytes_after);
+		alignsize = bytes_after;
+		memcpy_fromio(alignbuf, device_mem, alignsize);
+	} else
+		memcpy(alignbuf, device_mem, alignsize);
+
+	if (copy_to_user(buf, finalbuf, copy_size)) {
+		pr_err("Ramdump(%s): Couldn't copy all data to user.",
+			rd_dev->name);
+		rd_dev->ramdump_status = -1;
+		ret = -EFAULT;
+		goto ramdump_done;
+	}
+
+	kfree(finalbuf);
+	if (!vaddr && origdevice_mem)
+		dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+
+	*pos += copy_size;
+
+	pr_debug("Ramdump(%s): Read %zd bytes from address %lx.",
+			rd_dev->name, copy_size, addr);
+
+	return *pos - orig_pos;
+
+ramdump_done:
+	if (!vaddr && origdevice_mem)
+		dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+
+	kfree(finalbuf);
+	rd_dev->data_ready = 0;
+	*pos = 0;
+	complete(&rd_dev->ramdump_complete);
+	return ret;
+}
+
+static unsigned int ramdump_poll(struct file *filep,
+					struct poll_table_struct *wait)
+{
+	struct ramdump_device *rd_dev = container_of(filep->private_data,
+				struct ramdump_device, device);
+	unsigned int mask = 0;
+
+	if (rd_dev->data_ready)
+		mask |= (POLLIN | POLLRDNORM);
+
+	poll_wait(filep, &rd_dev->dump_wait_q, wait);
+	return mask;
+}
+
+static const struct file_operations ramdump_file_ops = {
+	.open = ramdump_open,
+	.release = ramdump_release,
+	.read = ramdump_read,
+	.poll = ramdump_poll
+};
+
+void *create_ramdump_device(const char *dev_name, struct device *parent)
+{
+	int ret;
+	struct ramdump_device *rd_dev;
+
+	if (!dev_name) {
+		pr_err("%s: Invalid device name.\n", __func__);
+		return NULL;
+	}
+
+	rd_dev = kzalloc(sizeof(struct ramdump_device), GFP_KERNEL);
+
+	if (!rd_dev) {
+		pr_err("%s: Couldn't alloc space for ramdump device!",
+			__func__);
+		return NULL;
+	}
+
+	snprintf(rd_dev->name, ARRAY_SIZE(rd_dev->name), "ramdump_%s",
+		 dev_name);
+
+	init_completion(&rd_dev->ramdump_complete);
+
+	rd_dev->device.minor = MISC_DYNAMIC_MINOR;
+	rd_dev->device.name = rd_dev->name;
+	rd_dev->device.fops = &ramdump_file_ops;
+	rd_dev->device.parent = parent;
+	if (parent) {
+		rd_dev->complete_ramdump = of_property_read_bool(
+				parent->of_node, "qcom,complete-ramdump");
+		if (!rd_dev->complete_ramdump)
+			dev_info(parent,
+			"for %s segments only will be dumped.", dev_name);
+	}
+
+	init_waitqueue_head(&rd_dev->dump_wait_q);
+
+	ret = misc_register(&rd_dev->device);
+
+	if (ret) {
+		pr_err("%s: misc_register failed for %s (%d)", __func__,
+				dev_name, ret);
+		kfree(rd_dev);
+		return NULL;
+	}
+
+	return (void *)rd_dev;
+}
+EXPORT_SYMBOL(create_ramdump_device);
+
+void destroy_ramdump_device(void *dev)
+{
+	struct ramdump_device *rd_dev = dev;
+
+	if (IS_ERR_OR_NULL(rd_dev))
+		return;
+
+	misc_deregister(&rd_dev->device);
+	kfree(rd_dev);
+}
+EXPORT_SYMBOL(destroy_ramdump_device);
+
+static int _do_ramdump(void *handle, struct ramdump_segment *segments,
+		int nsegments, bool use_elf)
+{
+	int ret, i;
+	struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
+	Elf32_Phdr *phdr;
+	Elf32_Ehdr *ehdr;
+	unsigned long offset;
+
+	if (!rd_dev->consumer_present) {
+		pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
+		return -EPIPE;
+	}
+
+	if (rd_dev->complete_ramdump) {
+		for (i = 0; i < nsegments-1; i++)
+			segments[i].size =
+			PAGE_ALIGN(segments[i+1].address - segments[i].address);
+
+		segments[nsegments-1].size =
+			PAGE_ALIGN(segments[nsegments-1].size);
+	} else {
+		for (i = 0; i < nsegments; i++)
+			segments[i].size = PAGE_ALIGN(segments[i].size);
+	}
+
+	rd_dev->segments = segments;
+	rd_dev->nsegments = nsegments;
+
+	if (use_elf) {
+		rd_dev->elfcore_size = sizeof(*ehdr) +
+				       sizeof(*phdr) * nsegments;
+		ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
+		rd_dev->elfcore_buf = (char *)ehdr;
+		if (!rd_dev->elfcore_buf)
+			return -ENOMEM;
+
+		memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
+		ehdr->e_ident[EI_CLASS] = ELFCLASS32;
+		ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
+		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
+		ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
+		ehdr->e_type = ET_CORE;
+		ehdr->e_version = EV_CURRENT;
+		ehdr->e_phoff = sizeof(*ehdr);
+		ehdr->e_ehsize = sizeof(*ehdr);
+		ehdr->e_phentsize = sizeof(*phdr);
+		ehdr->e_phnum = nsegments;
+
+		offset = rd_dev->elfcore_size;
+		phdr = (Elf32_Phdr *)(ehdr + 1);
+		for (i = 0; i < nsegments; i++, phdr++) {
+			phdr->p_type = PT_LOAD;
+			phdr->p_offset = offset;
+			phdr->p_vaddr = phdr->p_paddr = segments[i].address;
+			phdr->p_filesz = phdr->p_memsz = segments[i].size;
+			phdr->p_flags = PF_R | PF_W | PF_X;
+			offset += phdr->p_filesz;
+		}
+	}
+
+	rd_dev->data_ready = 1;
+	rd_dev->ramdump_status = -1;
+
+	reinit_completion(&rd_dev->ramdump_complete);
+
+	/* Tell userspace that the data is ready */
+	wake_up(&rd_dev->dump_wait_q);
+
+	/* Wait (with a timeout) to let the ramdump complete */
+	ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
+			msecs_to_jiffies(RAMDUMP_WAIT_MSECS));
+
+	if (!ret) {
+		pr_err("Ramdump(%s): Timed out waiting for userspace.\n",
+			rd_dev->name);
+		ret = -EPIPE;
+	} else
+		ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
+
+	rd_dev->data_ready = 0;
+	rd_dev->elfcore_size = 0;
+	kfree(rd_dev->elfcore_buf);
+	rd_dev->elfcore_buf = NULL;
+	return ret;
+
+}
+
+int do_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
+{
+	return _do_ramdump(handle, segments, nsegments, false);
+}
+EXPORT_SYMBOL(do_ramdump);
+
+int
+do_elf_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
+{
+	return _do_ramdump(handle, segments, nsegments, true);
+}
+EXPORT_SYMBOL(do_elf_ramdump);
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
new file mode 100644
index 0000000..a7d5d37
--- /dev/null
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -0,0 +1,1160 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/dma-mapping.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/scm.h>
+
+#include <soc/qcom/smem.h>
+
+#include "peripheral-loader.h"
+
+#define XO_FREQ			19200000
+#define PROXY_TIMEOUT_MS	10000
+#define MAX_SSR_REASON_LEN	81U
+#define STOP_ACK_TIMEOUT_MS	1000
+#define CRASH_STOP_ACK_TO_MS	200
+
+#define ERR_READY	0
+#define PBL_DONE	1
+
+#define desc_to_data(d) container_of(d, struct pil_tz_data, desc)
+#define subsys_to_data(d) container_of(d, struct pil_tz_data, subsys_desc)
+
+/**
+ * struct reg_info - regulator info
+ * @reg: regulator handle
+ * @uV: voltage in uV
+ * @uA: current in uA
+ */
+struct reg_info {
+	struct regulator *reg;
+	int uV;
+	int uA;
+};
+
+/**
+ * struct pil_tz_data
+ * @regs: regulators that should be always on when the subsystem is
+ *	   brought out of reset
+ * @proxy_regs: regulators that should be on during pil proxy voting
+ * @clks: clocks that should be always on when the subsystem is
+ *	  brought out of reset
+ * @proxy_clks: clocks that should be on during pil proxy voting
+ * @reg_count: the number of always on regulators
+ * @proxy_reg_count: the number of proxy voting regulators
+ * @clk_count: the number of always on clocks
+ * @proxy_clk_count: the number of proxy voting clocks
+ * @smem_id: the smem id used for read the subsystem crash reason
+ * @ramdump_dev: ramdump device pointer
+ * @pas_id: the PAS id for tz
+ * @bus_client: bus client id
+ * @enable_bus_scaling: set to true if PIL needs to vote for
+ *			bus bandwidth
+ * @keep_proxy_regs_on: If set, during proxy unvoting, PIL removes the
+ *			voltage/current vote for proxy regulators but leaves
+ *			them enabled.
+ * @stop_ack: state of completion of stop ack
+ * @desc: PIL descriptor
+ * @subsys: subsystem device pointer
+ * @subsys_desc: subsystem descriptor
+ * @u32 bits_arr[2]: array of bit positions in SCSR registers
+ */
+struct pil_tz_data {
+	struct reg_info *regs;
+	struct reg_info *proxy_regs;
+	struct clk **clks;
+	struct clk **proxy_clks;
+	int reg_count;
+	int proxy_reg_count;
+	int clk_count;
+	int proxy_clk_count;
+	int smem_id;
+	void *ramdump_dev;
+	u32 pas_id;
+	u32 bus_client;
+	bool enable_bus_scaling;
+	bool keep_proxy_regs_on;
+	struct completion stop_ack;
+	struct pil_desc desc;
+	struct subsys_device *subsys;
+	struct subsys_desc subsys_desc;
+	void __iomem *irq_status;
+	void __iomem *irq_clear;
+	void __iomem *irq_mask;
+	void __iomem *err_status;
+	void __iomem *err_status_spare;
+	u32 bits_arr[2];
+};
+
+enum scm_cmd {
+	PAS_INIT_IMAGE_CMD = 1,
+	PAS_MEM_SETUP_CMD,
+	PAS_AUTH_AND_RESET_CMD = 5,
+	PAS_SHUTDOWN_CMD,
+};
+
+enum pas_id {
+	PAS_MODEM,
+	PAS_Q6,
+	PAS_DSPS,
+	PAS_TZAPPS,
+	PAS_MODEM_SW,
+	PAS_MODEM_FW,
+	PAS_WCNSS,
+	PAS_SECAPP,
+	PAS_GSS,
+	PAS_VIDC,
+	PAS_VPU,
+	PAS_BCSS,
+};
+
+static struct msm_bus_paths scm_pas_bw_tbl[] = {
+	{
+		.vectors = (struct msm_bus_vectors[]){
+			{
+				.src = MSM_BUS_MASTER_SPS,
+				.dst = MSM_BUS_SLAVE_EBI_CH0,
+			},
+		},
+		.num_paths = 1,
+	},
+	{
+		.vectors = (struct msm_bus_vectors[]){
+			{
+				.src = MSM_BUS_MASTER_SPS,
+				.dst = MSM_BUS_SLAVE_EBI_CH0,
+				.ib = 492 * 8 * 1000000UL,
+				.ab = 492 * 8 *  100000UL,
+			},
+		},
+		.num_paths = 1,
+	},
+};
+
+static struct msm_bus_scale_pdata scm_pas_bus_pdata = {
+	.usecase = scm_pas_bw_tbl,
+	.num_usecases = ARRAY_SIZE(scm_pas_bw_tbl),
+	.name = "scm_pas",
+};
+
+static uint32_t scm_perf_client;
+static int scm_pas_bw_count;
+static DEFINE_MUTEX(scm_pas_bw_mutex);
+
+static int scm_pas_enable_bw(void)
+{
+	int ret = 0;
+
+	if (!scm_perf_client)
+		return -EINVAL;
+
+	mutex_lock(&scm_pas_bw_mutex);
+	if (!scm_pas_bw_count) {
+		ret = msm_bus_scale_client_update_request(scm_perf_client, 1);
+		if (ret)
+			goto err_bus;
+		scm_pas_bw_count++;
+	}
+
+	mutex_unlock(&scm_pas_bw_mutex);
+	return ret;
+
+err_bus:
+	pr_err("scm-pas; Bandwidth request failed (%d)\n", ret);
+	msm_bus_scale_client_update_request(scm_perf_client, 0);
+
+	mutex_unlock(&scm_pas_bw_mutex);
+	return ret;
+}
+
+static void scm_pas_disable_bw(void)
+{
+	mutex_lock(&scm_pas_bw_mutex);
+	if (scm_pas_bw_count-- == 1)
+		msm_bus_scale_client_update_request(scm_perf_client, 0);
+	mutex_unlock(&scm_pas_bw_mutex);
+}
+
+static void scm_pas_init(int id)
+{
+	static int is_inited;
+
+	if (is_inited)
+		return;
+
+	scm_pas_bw_tbl[0].vectors[0].src = id;
+	scm_pas_bw_tbl[1].vectors[0].src = id;
+
+	scm_perf_client = msm_bus_scale_register_client(&scm_pas_bus_pdata);
+	if (!scm_perf_client)
+		pr_warn("scm-pas: Unable to register bus client\n");
+
+	is_inited = 1;
+}
+
+static int of_read_clocks(struct device *dev, struct clk ***clks_ref,
+			  const char *propname)
+{
+	long clk_count;
+	int i, len;
+	struct clk **clks;
+
+	if (!of_find_property(dev->of_node, propname, &len))
+		return 0;
+
+	clk_count = of_property_count_strings(dev->of_node, propname);
+	if (IS_ERR_VALUE(clk_count)) {
+		dev_err(dev, "Failed to get clock names\n");
+		return -EINVAL;
+	}
+
+	clks = devm_kzalloc(dev, sizeof(struct clk *) * clk_count,
+				GFP_KERNEL);
+	if (!clks)
+		return -ENOMEM;
+
+	for (i = 0; i < clk_count; i++) {
+		const char *clock_name;
+		char clock_freq_name[50];
+		u32 clock_rate = XO_FREQ;
+
+		of_property_read_string_index(dev->of_node,
+					      propname, i,
+					      &clock_name);
+		snprintf(clock_freq_name, ARRAY_SIZE(clock_freq_name),
+						"qcom,%s-freq", clock_name);
+		if (of_find_property(dev->of_node, clock_freq_name, &len))
+			if (of_property_read_u32(dev->of_node, clock_freq_name,
+								&clock_rate)) {
+				dev_err(dev, "Failed to read %s clock's freq\n",
+							clock_freq_name);
+				return -EINVAL;
+			}
+
+		clks[i] = devm_clk_get(dev, clock_name);
+		if (IS_ERR(clks[i])) {
+			int rc = PTR_ERR(clks[i]);
+
+			if (rc != -EPROBE_DEFER)
+				dev_err(dev, "Failed to get %s clock\n",
+								clock_name);
+			return rc;
+		}
+
+		/* Make sure rate-settable clocks' rates are set */
+		if (clk_get_rate(clks[i]) == 0)
+			clk_set_rate(clks[i], clk_round_rate(clks[i],
+								clock_rate));
+	}
+
+	*clks_ref = clks;
+	return clk_count;
+}
+
+static int of_read_regs(struct device *dev, struct reg_info **regs_ref,
+			const char *propname)
+{
+	long reg_count;
+	int i, len, rc;
+	struct reg_info *regs;
+
+	if (!of_find_property(dev->of_node, propname, &len))
+		return 0;
+
+	reg_count = of_property_count_strings(dev->of_node, propname);
+	if (IS_ERR_VALUE(reg_count)) {
+		dev_err(dev, "Failed to get regulator names\n");
+		return -EINVAL;
+	}
+
+	regs = devm_kzalloc(dev, sizeof(struct reg_info) * reg_count,
+				GFP_KERNEL);
+	if (!regs)
+		return -ENOMEM;
+
+	for (i = 0; i < reg_count; i++) {
+		const char *reg_name;
+		char reg_uV_uA_name[50];
+		u32 vdd_uV_uA[2];
+
+		of_property_read_string_index(dev->of_node,
+					      propname, i,
+					      &reg_name);
+
+		regs[i].reg = devm_regulator_get(dev, reg_name);
+		if (IS_ERR(regs[i].reg)) {
+			int rc = PTR_ERR(regs[i].reg);
+
+			if (rc != -EPROBE_DEFER)
+				dev_err(dev, "Failed to get %s\n regulator",
+								reg_name);
+			return rc;
+		}
+
+		/*
+		 * Read the voltage and current values for the corresponding
+		 * regulator. The device tree property name is "qcom," +
+		 *  "regulator_name" + "-uV-uA".
+		 */
+		rc = snprintf(reg_uV_uA_name, ARRAY_SIZE(reg_uV_uA_name),
+			 "qcom,%s-uV-uA", reg_name);
+		if (rc < strlen(reg_name) + 6) {
+			dev_err(dev, "Failed to hold reg_uV_uA_name\n");
+			return -EINVAL;
+		}
+
+		if (!of_find_property(dev->of_node, reg_uV_uA_name, &len))
+			continue;
+
+		len /= sizeof(vdd_uV_uA[0]);
+
+		/* There should be two entries: one for uV and one for uA */
+		if (len != 2) {
+			dev_err(dev, "Missing uV/uA value\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32_array(dev->of_node, reg_uV_uA_name,
+					vdd_uV_uA, len);
+		if (rc) {
+			dev_err(dev, "Failed to read uV/uA values(rc:%d)\n",
+									rc);
+			return rc;
+		}
+
+		regs[i].uV = vdd_uV_uA[0];
+		regs[i].uA = vdd_uV_uA[1];
+	}
+
+	*regs_ref = regs;
+	return reg_count;
+}
+
+static int of_read_bus_pdata(struct platform_device *pdev,
+			     struct pil_tz_data *d)
+{
+	struct msm_bus_scale_pdata *pdata;
+
+	pdata = msm_bus_cl_get_pdata(pdev);
+
+	if (!pdata)
+		return -EINVAL;
+
+	d->bus_client = msm_bus_scale_register_client(pdata);
+	if (!d->bus_client)
+		pr_warn("%s: Unable to register bus client\n", __func__);
+
+	return 0;
+}
+
+static int piltz_resc_init(struct platform_device *pdev, struct pil_tz_data *d)
+{
+	int len, count, rc;
+	struct device *dev = &pdev->dev;
+
+	count = of_read_clocks(dev, &d->clks, "qcom,active-clock-names");
+	if (count < 0) {
+		dev_err(dev, "Failed to setup clocks.\n");
+		return count;
+	}
+	d->clk_count = count;
+
+	count = of_read_clocks(dev, &d->proxy_clks, "qcom,proxy-clock-names");
+	if (count < 0) {
+		dev_err(dev, "Failed to setup proxy clocks.\n");
+		return count;
+	}
+	d->proxy_clk_count = count;
+
+	count = of_read_regs(dev, &d->regs, "qcom,active-reg-names");
+	if (count < 0) {
+		dev_err(dev, "Failed to setup regulators.\n");
+		return count;
+	}
+	d->reg_count = count;
+
+	count = of_read_regs(dev, &d->proxy_regs, "qcom,proxy-reg-names");
+	if (count < 0) {
+		dev_err(dev, "Failed to setup proxy regulators.\n");
+		return count;
+	}
+	d->proxy_reg_count = count;
+
+	if (of_find_property(dev->of_node, "qcom,msm-bus,name", &len)) {
+		d->enable_bus_scaling = true;
+		rc = of_read_bus_pdata(pdev, d);
+		if (rc) {
+			dev_err(dev, "Failed to setup bus scaling client.\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int enable_regulators(struct pil_tz_data *d, struct device *dev,
+				struct reg_info *regs, int reg_count,
+				bool reg_no_enable)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < reg_count; i++) {
+		if (regs[i].uV > 0) {
+			rc = regulator_set_voltage(regs[i].reg,
+					regs[i].uV, INT_MAX);
+			if (rc) {
+				dev_err(dev, "Failed to request voltage(rc:%d)\n",
+									rc);
+				goto err_voltage;
+			}
+		}
+
+		if (regs[i].uA > 0) {
+			rc = regulator_set_load(regs[i].reg,
+						regs[i].uA);
+			if (rc < 0) {
+				dev_err(dev, "Failed to set regulator mode(rc:%d)\n",
+									rc);
+				goto err_mode;
+			}
+		}
+
+		if (d->keep_proxy_regs_on && reg_no_enable)
+			continue;
+
+		rc = regulator_enable(regs[i].reg);
+		if (rc) {
+			dev_err(dev, "Regulator enable failed(rc:%d)\n", rc);
+			goto err_enable;
+		}
+	}
+
+	return 0;
+err_enable:
+	if (regs[i].uA > 0) {
+		regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+		regulator_set_load(regs[i].reg, 0);
+	}
+err_mode:
+	if (regs[i].uV > 0)
+		regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+err_voltage:
+	for (i--; i >= 0; i--) {
+		if (regs[i].uV > 0)
+			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+
+		if (regs[i].uA > 0)
+			regulator_set_load(regs[i].reg, 0);
+
+		if (d->keep_proxy_regs_on && reg_no_enable)
+			continue;
+		regulator_disable(regs[i].reg);
+	}
+
+	return rc;
+}
+
+static void disable_regulators(struct pil_tz_data *d, struct reg_info *regs,
+					int reg_count, bool reg_no_disable)
+{
+	int i;
+
+	for (i = 0; i < reg_count; i++) {
+		if (regs[i].uV > 0)
+			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+
+		if (regs[i].uA > 0)
+			regulator_set_load(regs[i].reg, 0);
+
+		if (d->keep_proxy_regs_on && reg_no_disable)
+			continue;
+		regulator_disable(regs[i].reg);
+	}
+}
+
+static int prepare_enable_clocks(struct device *dev, struct clk **clks,
+								int clk_count)
+{
+	int rc = 0;
+	int i;
+
+	for (i = 0; i < clk_count; i++) {
+		rc = clk_prepare_enable(clks[i]);
+		if (rc) {
+			dev_err(dev, "Clock enable failed(rc:%d)\n", rc);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	for (i--; i >= 0; i--)
+		clk_disable_unprepare(clks[i]);
+
+	return rc;
+}
+
+static void disable_unprepare_clocks(struct clk **clks, int clk_count)
+{
+	int i;
+
+	for (i = --clk_count; i >= 0; i--)
+		clk_disable_unprepare(clks[i]);
+}
+
+static int pil_make_proxy_vote(struct pil_desc *pil)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	int rc;
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	rc = enable_regulators(d, pil->dev, d->proxy_regs,
+					d->proxy_reg_count, false);
+	if (rc)
+		return rc;
+
+	rc = prepare_enable_clocks(pil->dev, d->proxy_clks,
+							d->proxy_clk_count);
+	if (rc)
+		goto err_clks;
+
+	if (d->bus_client) {
+		rc = msm_bus_scale_client_update_request(d->bus_client, 1);
+		if (rc) {
+			dev_err(pil->dev, "bandwidth request failed(rc:%d)\n",
+									rc);
+			goto err_bw;
+		}
+	} else
+		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+					d->subsys_desc.name);
+
+	return 0;
+err_bw:
+	disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+err_clks:
+	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+
+	return rc;
+}
+
+static void pil_remove_proxy_vote(struct pil_desc *pil)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+
+	if (d->subsys_desc.no_auth)
+		return;
+
+	if (d->bus_client)
+		msm_bus_scale_client_update_request(d->bus_client, 0);
+	else
+		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+					d->subsys_desc.name);
+
+	disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+
+	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, true);
+}
+
+static int pil_init_image_trusted(struct pil_desc *pil,
+		const u8 *metadata, size_t size)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	struct pas_init_image_req {
+		u32	proc;
+		u32	image_addr;
+	} request;
+	u32 scm_ret = 0;
+	void *mdata_buf;
+	dma_addr_t mdata_phys;
+	int ret;
+	unsigned long attrs = 0;
+	struct device dev = {0};
+	struct scm_desc desc = {0};
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	ret = scm_pas_enable_bw();
+	if (ret)
+		return ret;
+	arch_setup_dma_ops(&dev, 0, 0, NULL, 0);
+
+	dev.coherent_dma_mask =
+		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	attrs |= DMA_ATTR_STRONGLY_ORDERED;
+	mdata_buf = dma_alloc_attrs(&dev, size, &mdata_phys, GFP_KERNEL,
+					attrs);
+	if (!mdata_buf) {
+		pr_err("scm-pas: Allocation for metadata failed.\n");
+		scm_pas_disable_bw();
+		return -ENOMEM;
+	}
+
+	memcpy(mdata_buf, metadata, size);
+
+	request.proc = d->pas_id;
+	request.image_addr = mdata_phys;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request,
+				sizeof(request), &scm_ret, sizeof(scm_ret));
+	} else {
+		desc.args[0] = d->pas_id;
+		desc.args[1] = mdata_phys;
+		desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_RW);
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD),
+				&desc);
+		scm_ret = desc.ret[0];
+	}
+
+	dma_free_attrs(&dev, size, mdata_buf, mdata_phys, attrs);
+	scm_pas_disable_bw();
+	if (ret)
+		return ret;
+	return scm_ret;
+}
+
+static int pil_mem_setup_trusted(struct pil_desc *pil, phys_addr_t addr,
+			       size_t size)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	struct pas_init_image_req {
+		u32	proc;
+		u32	start_addr;
+		u32	len;
+	} request;
+	u32 scm_ret = 0;
+	int ret;
+	struct scm_desc desc = {0};
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	request.proc = d->pas_id;
+	request.start_addr = addr;
+	request.len = size;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
+				sizeof(request), &scm_ret, sizeof(scm_ret));
+	} else {
+		desc.args[0] = d->pas_id;
+		desc.args[1] = addr;
+		desc.args[2] = size;
+		desc.arginfo = SCM_ARGS(3);
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
+				&desc);
+		scm_ret = desc.ret[0];
+	}
+	if (ret)
+		return ret;
+	return scm_ret;
+}
+
+static int pil_auth_and_reset(struct pil_desc *pil)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	int rc;
+	u32 proc, scm_ret = 0;
+	struct scm_desc desc = {0};
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	desc.args[0] = proc = d->pas_id;
+	desc.arginfo = SCM_ARGS(1);
+
+	rc = enable_regulators(d, pil->dev, d->regs, d->reg_count, false);
+	if (rc)
+		return rc;
+
+	rc = prepare_enable_clocks(pil->dev, d->clks, d->clk_count);
+	if (rc)
+		goto err_clks;
+
+	rc = scm_pas_enable_bw();
+	if (rc)
+		goto err_reset;
+
+	if (!is_scm_armv8()) {
+		rc = scm_call(SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &proc,
+				sizeof(proc), &scm_ret, sizeof(scm_ret));
+	} else {
+		rc = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+			       PAS_AUTH_AND_RESET_CMD), &desc);
+		scm_ret = desc.ret[0];
+	}
+	scm_pas_disable_bw();
+	if (rc)
+		goto err_reset;
+
+	return scm_ret;
+err_reset:
+	disable_unprepare_clocks(d->clks, d->clk_count);
+err_clks:
+	disable_regulators(d, d->regs, d->reg_count, false);
+
+	return rc;
+}
+
+static int pil_shutdown_trusted(struct pil_desc *pil)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	u32 proc, scm_ret = 0;
+	int rc;
+	struct scm_desc desc = {0};
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	desc.args[0] = proc = d->pas_id;
+	desc.arginfo = SCM_ARGS(1);
+
+	rc = enable_regulators(d, pil->dev, d->proxy_regs,
+					d->proxy_reg_count, true);
+	if (rc)
+		return rc;
+
+	rc = prepare_enable_clocks(pil->dev, d->proxy_clks,
+						d->proxy_clk_count);
+	if (rc)
+		goto err_clks;
+
+	if (!is_scm_armv8()) {
+		rc = scm_call(SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &proc,
+			      sizeof(proc), &scm_ret, sizeof(scm_ret));
+	} else {
+		rc = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_SHUTDOWN_CMD),
+			       &desc);
+		scm_ret = desc.ret[0];
+	}
+
+	disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+
+	if (rc)
+		return rc;
+
+	disable_unprepare_clocks(d->clks, d->clk_count);
+	disable_regulators(d, d->regs, d->reg_count, false);
+
+	return scm_ret;
+err_clks:
+	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+	return rc;
+}
+
+static struct pil_reset_ops pil_ops_trusted = {
+	.init_image = pil_init_image_trusted,
+	.mem_setup =  pil_mem_setup_trusted,
+	.auth_and_reset = pil_auth_and_reset,
+	.shutdown = pil_shutdown_trusted,
+	.proxy_vote = pil_make_proxy_vote,
+	.proxy_unvote = pil_remove_proxy_vote,
+};
+
+static void log_failure_reason(const struct pil_tz_data *d)
+{
+	u32 size;
+	char *smem_reason, reason[MAX_SSR_REASON_LEN];
+	const char *name = d->subsys_desc.name;
+
+	if (d->smem_id == -1)
+		return;
+
+	smem_reason = smem_get_entry_no_rlock(d->smem_id, &size, 0,
+							SMEM_ANY_HOST_FLAG);
+	if (!smem_reason || !size) {
+		pr_err("%s SFR: (unknown, smem_get_entry_no_rlock failed).\n",
+									name);
+		return;
+	}
+	if (!smem_reason[0]) {
+		pr_err("%s SFR: (unknown, empty string found).\n", name);
+		return;
+	}
+
+	strlcpy(reason, smem_reason, min(size, MAX_SSR_REASON_LEN));
+	pr_err("%s subsystem failure reason: %s.\n", name, reason);
+
+	smem_reason[0] = '\0';
+	wmb();
+}
+
+static int subsys_shutdown(const struct subsys_desc *subsys, bool force_stop)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+	int ret;
+
+	if (!subsys_get_crash_status(d->subsys) && force_stop &&
+						subsys->force_stop_gpio) {
+		gpio_set_value(subsys->force_stop_gpio, 1);
+		ret = wait_for_completion_timeout(&d->stop_ack,
+				msecs_to_jiffies(STOP_ACK_TIMEOUT_MS));
+		if (!ret)
+			pr_warn("Timed out on stop ack from %s.\n",
+							subsys->name);
+		gpio_set_value(subsys->force_stop_gpio, 0);
+	}
+
+	pil_shutdown(&d->desc);
+	return 0;
+}
+
+static int subsys_powerup(const struct subsys_desc *subsys)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+	int ret = 0;
+
+	if (subsys->stop_ack_irq)
+		reinit_completion(&d->stop_ack);
+
+	d->desc.fw_name = subsys->fw_name;
+	ret = pil_boot(&d->desc);
+
+	return ret;
+}
+
+static int subsys_ramdump(int enable, const struct subsys_desc *subsys)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+
+	if (!enable)
+		return 0;
+
+	return pil_do_ramdump(&d->desc, d->ramdump_dev);
+}
+
+static void subsys_free_memory(const struct subsys_desc *subsys)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+
+	pil_free_memory(&d->desc);
+}
+
+static void subsys_crash_shutdown(const struct subsys_desc *subsys)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+
+	if (subsys->force_stop_gpio > 0 &&
+				!subsys_get_crash_status(d->subsys)) {
+		gpio_set_value(subsys->force_stop_gpio, 1);
+		mdelay(CRASH_STOP_ACK_TO_MS);
+	}
+}
+
+static irqreturn_t subsys_err_fatal_intr_handler (int irq, void *dev_id)
+{
+	struct pil_tz_data *d = subsys_to_data(dev_id);
+
+	pr_err("Fatal error on %s!\n", d->subsys_desc.name);
+	if (subsys_get_crash_status(d->subsys)) {
+		pr_err("%s: Ignoring error fatal, restart in progress\n",
+							d->subsys_desc.name);
+		return IRQ_HANDLED;
+	}
+	subsys_set_crash_status(d->subsys, true);
+	log_failure_reason(d);
+	subsystem_restart_dev(d->subsys);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t subsys_wdog_bite_irq_handler(int irq, void *dev_id)
+{
+	struct pil_tz_data *d = subsys_to_data(dev_id);
+
+	if (subsys_get_crash_status(d->subsys))
+		return IRQ_HANDLED;
+	pr_err("Watchdog bite received from %s!\n", d->subsys_desc.name);
+
+	if (d->subsys_desc.system_debug &&
+			!gpio_get_value(d->subsys_desc.err_fatal_gpio))
+		panic("%s: System ramdump requested. Triggering device restart!\n",
+							__func__);
+	subsys_set_crash_status(d->subsys, true);
+	log_failure_reason(d);
+	subsystem_restart_dev(d->subsys);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t subsys_stop_ack_intr_handler(int irq, void *dev_id)
+{
+	struct pil_tz_data *d = subsys_to_data(dev_id);
+
+	pr_info("Received stop ack interrupt from %s\n", d->subsys_desc.name);
+	complete(&d->stop_ack);
+	return IRQ_HANDLED;
+}
+
+static void check_pbl_done(struct pil_tz_data *d)
+{
+	uint32_t err_value;
+
+	err_value =  __raw_readl(d->err_status);
+	pr_debug("PBL_DONE received from %s!\n", d->subsys_desc.name);
+	if (err_value) {
+		uint32_t rmb_err_spare0;
+		uint32_t rmb_err_spare1;
+		uint32_t rmb_err_spare2;
+
+		rmb_err_spare2 =  __raw_readl(d->err_status_spare);
+		rmb_err_spare1 =  __raw_readl(d->err_status_spare-4);
+		rmb_err_spare0 =  __raw_readl(d->err_status_spare-8);
+
+		pr_err("PBL error status register: 0x%08x\n", err_value);
+
+		pr_err("PBL error status spare0 register: 0x%08x\n",
+			rmb_err_spare0);
+		pr_err("PBL error status spare1 register: 0x%08x\n",
+			rmb_err_spare1);
+		pr_err("PBL error status spare2 register: 0x%08x\n",
+			rmb_err_spare2);
+	}
+	__raw_writel(BIT(d->bits_arr[PBL_DONE]), d->irq_clear);
+}
+
+static void check_err_ready(struct pil_tz_data *d)
+{
+	uint32_t err_value;
+
+	err_value =  __raw_readl(d->err_status_spare);
+	if (!err_value) {
+		pr_debug("Subsystem error services up received from %s!\n",
+							d->subsys_desc.name);
+		__raw_writel(BIT(d->bits_arr[ERR_READY]), d->irq_clear);
+		complete_err_ready(d->subsys);
+	} else if (err_value == 0x44554d50) {
+		pr_err("wdog bite received from %s!\n", d->subsys_desc.name);
+		__raw_writel(BIT(d->bits_arr[ERR_READY]), d->irq_clear);
+		subsys_set_crash_status(d->subsys, true);
+		log_failure_reason(d);
+		subsystem_restart_dev(d->subsys);
+	}
+}
+
+static irqreturn_t subsys_generic_handler(int irq, void *dev_id)
+{
+	struct pil_tz_data *d = subsys_to_data(dev_id);
+	uint32_t status_val;
+
+	if (subsys_get_crash_status(d->subsys))
+		return IRQ_HANDLED;
+
+	status_val = __raw_readl(d->irq_status);
+
+	if (status_val & BIT(d->bits_arr[ERR_READY]))
+		check_err_ready(d);
+	else if (status_val & BIT(d->bits_arr[PBL_DONE]))
+		check_pbl_done(d);
+	return IRQ_HANDLED;
+}
+
+static void mask_scsr_irqs(struct pil_tz_data *d)
+{
+	uint32_t mask_val;
+	/* Masking all interrupts not handled by HLOS */
+	mask_val = ~0;
+	__raw_writel(mask_val & ~BIT(d->bits_arr[ERR_READY]) &
+			~BIT(d->bits_arr[PBL_DONE]), d->irq_mask);
+}
+
+static int pil_tz_driver_probe(struct platform_device *pdev)
+{
+	struct pil_tz_data *d;
+	struct resource *res;
+	u32 proxy_timeout;
+	int len, rc;
+
+	d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, d);
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,pil-no-auth"))
+		d->subsys_desc.no_auth = true;
+
+	d->keep_proxy_regs_on = of_property_read_bool(pdev->dev.of_node,
+						"qcom,keep-proxy-regs-on");
+
+	rc = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+				      &d->desc.name);
+	if (rc)
+		return rc;
+
+	/* Defaulting smem_id to be not present */
+	d->smem_id = -1;
+
+	if (of_find_property(pdev->dev.of_node, "qcom,smem-id", &len)) {
+		rc = of_property_read_u32(pdev->dev.of_node, "qcom,smem-id",
+						&d->smem_id);
+		if (rc) {
+			dev_err(&pdev->dev, "Failed to get the smem_id(rc:%d)\n",
+									rc);
+			return rc;
+		}
+	}
+
+	d->desc.dev = &pdev->dev;
+	d->desc.owner = THIS_MODULE;
+	d->desc.ops = &pil_ops_trusted;
+
+	d->desc.proxy_timeout = PROXY_TIMEOUT_MS;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,proxy-timeout-ms",
+					&proxy_timeout);
+	if (!rc)
+		d->desc.proxy_timeout = proxy_timeout;
+
+	if (!d->subsys_desc.no_auth) {
+		rc = piltz_resc_init(pdev, d);
+		if (rc)
+			return -ENOENT;
+
+		rc = of_property_read_u32(pdev->dev.of_node, "qcom,pas-id",
+								&d->pas_id);
+		if (rc) {
+			dev_err(&pdev->dev, "Failed to find the pas_id(rc:%d)\n",
+									rc);
+			return rc;
+		}
+		scm_pas_init(MSM_BUS_MASTER_CRYPTO_CORE0);
+	}
+
+	rc = pil_desc_init(&d->desc);
+	if (rc)
+		return rc;
+
+	init_completion(&d->stop_ack);
+
+	d->subsys_desc.name = d->desc.name;
+	d->subsys_desc.owner = THIS_MODULE;
+	d->subsys_desc.dev = &pdev->dev;
+	d->subsys_desc.shutdown = subsys_shutdown;
+	d->subsys_desc.powerup = subsys_powerup;
+	d->subsys_desc.ramdump = subsys_ramdump;
+	d->subsys_desc.free_memory = subsys_free_memory;
+	d->subsys_desc.crash_shutdown = subsys_crash_shutdown;
+	if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,pil-generic-irq-handler")) {
+		d->subsys_desc.generic_handler = subsys_generic_handler;
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"sp2soc_irq_status");
+		d->irq_status = devm_ioremap_resource(&pdev->dev, res);
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"sp2soc_irq_clr");
+		d->irq_clear = devm_ioremap_resource(&pdev->dev, res);
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"sp2soc_irq_mask");
+		d->irq_mask = devm_ioremap_resource(&pdev->dev, res);
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"rmb_err");
+		d->err_status = devm_ioremap_resource(&pdev->dev, res);
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"rmb_err_spare2");
+		d->err_status_spare = devm_ioremap_resource(&pdev->dev, res);
+		rc = of_property_read_u32_array(pdev->dev.of_node,
+		       "qcom,spss-scsr-bits", d->bits_arr, sizeof(d->bits_arr)/
+							sizeof(d->bits_arr[0]));
+		if (rc)
+			dev_err(&pdev->dev, "Failed to read qcom,spss-scsr-bits");
+		mask_scsr_irqs(d);
+
+	} else {
+		d->subsys_desc.err_fatal_handler =
+						subsys_err_fatal_intr_handler;
+		d->subsys_desc.wdog_bite_handler = subsys_wdog_bite_irq_handler;
+		d->subsys_desc.stop_ack_handler = subsys_stop_ack_intr_handler;
+	}
+	d->ramdump_dev = create_ramdump_device(d->subsys_desc.name,
+								&pdev->dev);
+	if (!d->ramdump_dev) {
+		rc = -ENOMEM;
+		goto err_ramdump;
+	}
+
+	d->subsys = subsys_register(&d->subsys_desc);
+	if (IS_ERR(d->subsys)) {
+		rc = PTR_ERR(d->subsys);
+		goto err_subsys;
+	}
+
+	return 0;
+err_subsys:
+	destroy_ramdump_device(d->ramdump_dev);
+err_ramdump:
+	pil_desc_release(&d->desc);
+
+	return rc;
+}
+
+static int pil_tz_driver_exit(struct platform_device *pdev)
+{
+	struct pil_tz_data *d = platform_get_drvdata(pdev);
+
+	subsys_unregister(d->subsys);
+	destroy_ramdump_device(d->ramdump_dev);
+	pil_desc_release(&d->desc);
+
+	return 0;
+}
+
+static const struct of_device_id pil_tz_match_table[] = {
+	{.compatible = "qcom,pil-tz-generic"},
+	{}
+};
+
+static struct platform_driver pil_tz_driver = {
+	.probe = pil_tz_driver_probe,
+	.remove = pil_tz_driver_exit,
+	.driver = {
+		.name = "subsys-pil-tz",
+		.of_match_table = pil_tz_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pil_tz_init(void)
+{
+	return platform_driver_register(&pil_tz_driver);
+}
+module_init(pil_tz_init);
+
+static void __exit pil_tz_exit(void)
+{
+	platform_driver_unregister(&pil_tz_driver);
+}
+module_exit(pil_tz_exit);
+
+MODULE_DESCRIPTION("Support for booting subsystems");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/subsystem_notif.c b/drivers/soc/qcom/subsystem_notif.c
new file mode 100644
index 0000000..f099dd5
--- /dev/null
+++ b/drivers/soc/qcom/subsystem_notif.c
@@ -0,0 +1,221 @@
+/* Copyright (c) 2011, 2013, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Subsystem Notifier -- Provides notifications
+ * of subsys events.
+ *
+ * Use subsys_notif_register_notifier to register for notifications
+ * and subsys_notif_queue_notification to send notifications.
+ *
+ */
+
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/stringify.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_notif.h>
+
+
+struct subsys_notif_info {
+	char name[50];
+	struct srcu_notifier_head subsys_notif_rcvr_list;
+	struct list_head list;
+};
+
+static LIST_HEAD(subsystem_list);
+static DEFINE_MUTEX(notif_lock);
+static DEFINE_MUTEX(notif_add_lock);
+
+#if defined(SUBSYS_RESTART_DEBUG)
+static void subsys_notif_reg_test_notifier(const char *);
+#endif
+
+static struct subsys_notif_info *_notif_find_subsys(const char *subsys_name)
+{
+	struct subsys_notif_info *subsys;
+
+	mutex_lock(&notif_lock);
+	list_for_each_entry(subsys, &subsystem_list, list)
+		if (!strcmp(subsys->name, subsys_name)) {
+			mutex_unlock(&notif_lock);
+			return subsys;
+		}
+	mutex_unlock(&notif_lock);
+
+	return NULL;
+}
+
+void *subsys_notif_register_notifier(
+			const char *subsys_name, struct notifier_block *nb)
+{
+	int ret;
+	struct subsys_notif_info *subsys = _notif_find_subsys(subsys_name);
+
+	if (!subsys) {
+
+		/* Possible first time reference to this subsystem. Add it. */
+		subsys = (struct subsys_notif_info *)
+				subsys_notif_add_subsys(subsys_name);
+
+		if (!subsys)
+			return ERR_PTR(-EINVAL);
+	}
+
+	ret = srcu_notifier_chain_register(
+		&subsys->subsys_notif_rcvr_list, nb);
+
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	return subsys;
+}
+EXPORT_SYMBOL(subsys_notif_register_notifier);
+
+int subsys_notif_unregister_notifier(void *subsys_handle,
+				struct notifier_block *nb)
+{
+	int ret;
+	struct subsys_notif_info *subsys =
+			(struct subsys_notif_info *)subsys_handle;
+
+	if (!subsys)
+		return -EINVAL;
+
+	ret = srcu_notifier_chain_unregister(
+		&subsys->subsys_notif_rcvr_list, nb);
+
+	return ret;
+}
+EXPORT_SYMBOL(subsys_notif_unregister_notifier);
+
+void *subsys_notif_add_subsys(const char *subsys_name)
+{
+	struct subsys_notif_info *subsys = NULL;
+
+	if (!subsys_name)
+		goto done;
+
+	mutex_lock(&notif_add_lock);
+
+	subsys = _notif_find_subsys(subsys_name);
+
+	if (subsys) {
+		mutex_unlock(&notif_add_lock);
+		goto done;
+	}
+
+	subsys = kmalloc(sizeof(struct subsys_notif_info), GFP_KERNEL);
+
+	if (!subsys) {
+		mutex_unlock(&notif_add_lock);
+		return ERR_PTR(-EINVAL);
+	}
+
+	strlcpy(subsys->name, subsys_name, ARRAY_SIZE(subsys->name));
+
+	srcu_init_notifier_head(&subsys->subsys_notif_rcvr_list);
+
+	INIT_LIST_HEAD(&subsys->list);
+
+	mutex_lock(&notif_lock);
+	list_add_tail(&subsys->list, &subsystem_list);
+	mutex_unlock(&notif_lock);
+
+	#if defined(SUBSYS_RESTART_DEBUG)
+	subsys_notif_reg_test_notifier(subsys->name);
+	#endif
+
+	mutex_unlock(&notif_add_lock);
+
+done:
+	return subsys;
+}
+EXPORT_SYMBOL(subsys_notif_add_subsys);
+
+int subsys_notif_queue_notification(void *subsys_handle,
+					enum subsys_notif_type notif_type,
+					void *data)
+{
+	int ret = 0;
+	struct subsys_notif_info *subsys =
+		(struct subsys_notif_info *) subsys_handle;
+
+	if (!subsys)
+		return -EINVAL;
+
+	if (notif_type < 0 || notif_type >= SUBSYS_NOTIF_TYPE_COUNT)
+		return -EINVAL;
+
+		ret = srcu_notifier_call_chain(
+			&subsys->subsys_notif_rcvr_list, notif_type,
+			data);
+	return ret;
+}
+EXPORT_SYMBOL(subsys_notif_queue_notification);
+
+#if defined(SUBSYS_RESTART_DEBUG)
+static const char *notif_to_string(enum subsys_notif_type notif_type)
+{
+	switch (notif_type) {
+
+	case	SUBSYS_BEFORE_SHUTDOWN:
+		return __stringify(SUBSYS_BEFORE_SHUTDOWN);
+
+	case	SUBSYS_AFTER_SHUTDOWN:
+		return __stringify(SUBSYS_AFTER_SHUTDOWN);
+
+	case	SUBSYS_BEFORE_POWERUP:
+		return __stringify(SUBSYS_BEFORE_POWERUP);
+
+	case	SUBSYS_AFTER_POWERUP:
+		return __stringify(SUBSYS_AFTER_POWERUP);
+
+	default:
+		return "unknown";
+	}
+}
+
+static int subsys_notifier_test_call(struct notifier_block *this,
+				  unsigned long code,
+				  void *data)
+{
+	switch (code) {
+
+	default:
+		printk(KERN_WARNING "%s: Notification %s from subsystem %p\n",
+			__func__, notif_to_string(code), data);
+	break;
+
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block nb = {
+	.notifier_call = subsys_notifier_test_call,
+};
+
+static void subsys_notif_reg_test_notifier(const char *subsys_name)
+{
+	void *handle = subsys_notif_register_notifier(subsys_name, &nb);
+
+	printk(KERN_WARNING "%s: Registered test notifier, handle=%p",
+			__func__, handle);
+}
+#endif
+
+MODULE_DESCRIPTION("Subsystem Restart Notifier");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
new file mode 100644
index 0000000..c6e288e
--- /dev/null
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -0,0 +1,1861 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "subsys-restart: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/time.h>
+#include <linux/suspend.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/sysmon.h>
+
+#include <asm/current.h>
+
+#include "peripheral-loader.h"
+
+#define DISABLE_SSR 0x9889deed
+/* If set to 0x9889deed, call to subsystem_restart_dev() returns immediately */
+static uint disable_restart_work;
+module_param(disable_restart_work, uint, 0644);
+
+static int enable_debug;
+module_param(enable_debug, int, 0644);
+
+/* The maximum shutdown timeout is the product of MAX_LOOPS and DELAY_MS. */
+#define SHUTDOWN_ACK_MAX_LOOPS	100
+#define SHUTDOWN_ACK_DELAY_MS	100
+
+/**
+ * enum p_subsys_state - state of a subsystem (private)
+ * @SUBSYS_NORMAL: subsystem is operating normally
+ * @SUBSYS_CRASHED: subsystem has crashed and hasn't been shutdown
+ * @SUBSYS_RESTARTING: subsystem has been shutdown and is now restarting
+ *
+ * The 'private' side of the subsytem state used to determine where in the
+ * restart process the subsystem is.
+ */
+enum p_subsys_state {
+	SUBSYS_NORMAL,
+	SUBSYS_CRASHED,
+	SUBSYS_RESTARTING,
+};
+
+/**
+ * enum subsys_state - state of a subsystem (public)
+ * @SUBSYS_OFFLINING: subsystem is offlining
+ * @SUBSYS_OFFLINE: subsystem is offline
+ * @SUBSYS_ONLINE: subsystem is online
+ *
+ * The 'public' side of the subsytem state, exposed to userspace.
+ */
+enum subsys_state {
+	SUBSYS_OFFLINING,
+	SUBSYS_OFFLINE,
+	SUBSYS_ONLINE,
+};
+
+static const char * const subsys_states[] = {
+	[SUBSYS_OFFLINING] = "OFFLINING",
+	[SUBSYS_OFFLINE] = "OFFLINE",
+	[SUBSYS_ONLINE] = "ONLINE",
+};
+
+static const char * const restart_levels[] = {
+	[RESET_SOC] = "SYSTEM",
+	[RESET_SUBSYS_COUPLED] = "RELATED",
+};
+
+/**
+ * struct subsys_tracking - track state of a subsystem or restart order
+ * @p_state: private state of subsystem/order
+ * @state: public state of subsystem/order
+ * @s_lock: protects p_state
+ * @lock: protects subsystem/order callbacks and state
+ *
+ * Tracks the state of a subsystem or a set of subsystems (restart order).
+ * Doing this avoids the need to grab each subsystem's lock and update
+ * each subsystems state when restarting an order.
+ */
+struct subsys_tracking {
+	enum p_subsys_state p_state;
+	spinlock_t s_lock;
+	enum subsys_state state;
+	struct mutex lock;
+};
+
+/**
+ * struct subsys_soc_restart_order - subsystem restart order
+ * @subsystem_list: names of subsystems in this restart order
+ * @count: number of subsystems in order
+ * @track: state tracking and locking
+ * @subsys_ptrs: pointers to subsystems in this restart order
+ */
+struct subsys_soc_restart_order {
+	struct device_node **device_ptrs;
+	int count;
+
+	struct subsys_tracking track;
+	struct subsys_device **subsys_ptrs;
+	struct list_head list;
+};
+
+struct restart_log {
+	struct timeval time;
+	struct subsys_device *dev;
+	struct list_head list;
+};
+
+/**
+ * struct subsys_device - subsystem device
+ * @desc: subsystem descriptor
+ * @work: context for subsystem_restart_wq_func() for this device
+ * @ssr_wlock: prevents suspend during subsystem_restart()
+ * @wlname: name of wakeup source
+ * @device_restart_work: work struct for device restart
+ * @track: state tracking and locking
+ * @notify: subsys notify handle
+ * @dev: device
+ * @owner: module that provides @desc
+ * @count: reference count of subsystem_get()/subsystem_put()
+ * @id: ida
+ * @restart_level: restart level (0 - panic, 1 - related, 2 - independent, etc.)
+ * @restart_order: order of other devices this devices restarts with
+ * @crash_count: number of times the device has crashed
+ * @dentry: debugfs directory for this device
+ * @do_ramdump_on_put: ramdump on subsystem_put() if true
+ * @err_ready: completion variable to record error ready from subsystem
+ * @crashed: indicates if subsystem has crashed
+ * @notif_state: current state of subsystem in terms of subsys notifications
+ */
+struct subsys_device {
+	struct subsys_desc *desc;
+	struct work_struct work;
+	struct wakeup_source ssr_wlock;
+	char wlname[64];
+	struct work_struct device_restart_work;
+	struct subsys_tracking track;
+
+	void *notify;
+	struct device dev;
+	struct module *owner;
+	int count;
+	int id;
+	int restart_level;
+	int crash_count;
+	struct subsys_soc_restart_order *restart_order;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dentry;
+#endif
+	bool do_ramdump_on_put;
+	struct cdev char_dev;
+	dev_t dev_no;
+	struct completion err_ready;
+	bool crashed;
+	int notif_state;
+	struct list_head list;
+};
+
+static struct subsys_device *to_subsys(struct device *d)
+{
+	return container_of(d, struct subsys_device, dev);
+}
+
+void complete_err_ready(struct subsys_device *subsys)
+{
+	complete(&subsys->err_ready);
+}
+
+static struct subsys_tracking *subsys_get_track(struct subsys_device *subsys)
+{
+	struct subsys_soc_restart_order *order = subsys->restart_order;
+
+	if (order)
+		return &order->track;
+	else
+		return &subsys->track;
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->desc->name);
+}
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	enum subsys_state state = to_subsys(dev)->track.state;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", subsys_states[state]);
+}
+
+static ssize_t crash_count_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", to_subsys(dev)->crash_count);
+}
+
+static ssize_t
+restart_level_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int level = to_subsys(dev)->restart_level;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", restart_levels[level]);
+}
+
+static ssize_t restart_level_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+	const char *p;
+	int i, orig_count = count;
+
+	p = memchr(buf, '\n', count);
+	if (p)
+		count = p - buf;
+
+	for (i = 0; i < ARRAY_SIZE(restart_levels); i++)
+		if (!strncasecmp(buf, restart_levels[i], count)) {
+			subsys->restart_level = i;
+			return orig_count;
+		}
+	return -EPERM;
+}
+
+static ssize_t firmware_name_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->desc->fw_name);
+}
+
+static ssize_t firmware_name_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+	struct subsys_tracking *track = subsys_get_track(subsys);
+	const char *p;
+	int orig_count = count;
+
+	p = memchr(buf, '\n', count);
+	if (p)
+		count = p - buf;
+
+	pr_info("Changing subsys fw_name to %s\n", buf);
+	mutex_lock(&track->lock);
+	strlcpy(subsys->desc->fw_name, buf,
+			min(count + 1, sizeof(subsys->desc->fw_name)));
+	mutex_unlock(&track->lock);
+	return orig_count;
+}
+
+static ssize_t system_debug_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+	char p[6] = "set";
+
+	if (!subsys->desc->system_debug)
+		strlcpy(p, "reset", sizeof(p));
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", p);
+}
+
+static ssize_t system_debug_store(struct device *dev,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+	const char *p;
+	int orig_count = count;
+
+	p = memchr(buf, '\n', count);
+	if (p)
+		count = p - buf;
+
+	if (!strncasecmp(buf, "set", count))
+		subsys->desc->system_debug = true;
+	else if (!strncasecmp(buf, "reset", count))
+		subsys->desc->system_debug = false;
+	else
+		return -EPERM;
+	return orig_count;
+}
+
+int subsys_get_restart_level(struct subsys_device *dev)
+{
+	return dev->restart_level;
+}
+EXPORT_SYMBOL(subsys_get_restart_level);
+
+static void subsys_set_state(struct subsys_device *subsys,
+			     enum subsys_state state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&subsys->track.s_lock, flags);
+	if (subsys->track.state != state) {
+		subsys->track.state = state;
+		spin_unlock_irqrestore(&subsys->track.s_lock, flags);
+		sysfs_notify(&subsys->dev.kobj, NULL, "state");
+		return;
+	}
+	spin_unlock_irqrestore(&subsys->track.s_lock, flags);
+}
+
+/**
+ * subsytem_default_online() - Mark a subsystem as online by default
+ * @dev: subsystem to mark as online
+ *
+ * Marks a subsystem as "online" without increasing the reference count
+ * on the subsystem. This is typically used by subsystems that are already
+ * online when the kernel boots up.
+ */
+void subsys_default_online(struct subsys_device *dev)
+{
+	subsys_set_state(dev, SUBSYS_ONLINE);
+}
+EXPORT_SYMBOL(subsys_default_online);
+
+static struct device_attribute subsys_attrs[] = {
+	__ATTR_RO(name),
+	__ATTR_RO(state),
+	__ATTR_RO(crash_count),
+	__ATTR(restart_level, 0644, restart_level_show, restart_level_store),
+	__ATTR(firmware_name, 0644, firmware_name_show, firmware_name_store),
+	__ATTR(system_debug, 0644, system_debug_show, system_debug_store),
+	__ATTR_NULL,
+};
+
+static struct bus_type subsys_bus_type = {
+	.name		= "msm_subsys",
+	.dev_attrs	= subsys_attrs,
+};
+
+static DEFINE_IDA(subsys_ida);
+
+static int enable_ramdumps;
+module_param(enable_ramdumps, int, 0644);
+
+static int enable_mini_ramdumps;
+module_param(enable_mini_ramdumps, int, 0644);
+
+struct workqueue_struct *ssr_wq;
+static struct class *char_class;
+
+static LIST_HEAD(restart_log_list);
+static LIST_HEAD(subsys_list);
+static LIST_HEAD(ssr_order_list);
+static DEFINE_MUTEX(soc_order_reg_lock);
+static DEFINE_MUTEX(restart_log_mutex);
+static DEFINE_MUTEX(subsys_list_lock);
+static DEFINE_MUTEX(char_device_lock);
+static DEFINE_MUTEX(ssr_order_mutex);
+
+static struct subsys_soc_restart_order *
+update_restart_order(struct subsys_device *dev)
+{
+	int i;
+	struct subsys_soc_restart_order *order;
+	struct device_node *device = dev->desc->dev->of_node;
+
+	mutex_lock(&soc_order_reg_lock);
+	list_for_each_entry(order, &ssr_order_list, list) {
+		for (i = 0; i < order->count; i++) {
+			if (order->device_ptrs[i] == device) {
+				order->subsys_ptrs[i] = dev;
+				goto found;
+			}
+		}
+	}
+	order = NULL;
+found:
+	mutex_unlock(&soc_order_reg_lock);
+
+	return order;
+}
+
+static int max_restarts;
+module_param(max_restarts, int, 0644);
+
+static long max_history_time = 3600;
+module_param(max_history_time, long, 0644);
+
+static void do_epoch_check(struct subsys_device *dev)
+{
+	int n = 0;
+	struct timeval *time_first = NULL, *curr_time;
+	struct restart_log *r_log, *temp;
+	static int max_restarts_check;
+	static long max_history_time_check;
+
+	mutex_lock(&restart_log_mutex);
+
+	max_restarts_check = max_restarts;
+	max_history_time_check = max_history_time;
+
+	/* Check if epoch checking is enabled */
+	if (!max_restarts_check)
+		goto out;
+
+	r_log = kmalloc(sizeof(struct restart_log), GFP_KERNEL);
+	if (!r_log)
+		goto out;
+	r_log->dev = dev;
+	do_gettimeofday(&r_log->time);
+	curr_time = &r_log->time;
+	INIT_LIST_HEAD(&r_log->list);
+
+	list_add_tail(&r_log->list, &restart_log_list);
+
+	list_for_each_entry_safe(r_log, temp, &restart_log_list, list) {
+
+		if ((curr_time->tv_sec - r_log->time.tv_sec) >
+				max_history_time_check) {
+
+			pr_debug("Deleted node with restart_time = %ld\n",
+					r_log->time.tv_sec);
+			list_del(&r_log->list);
+			kfree(r_log);
+			continue;
+		}
+		if (!n) {
+			time_first = &r_log->time;
+			pr_debug("Time_first: %ld\n", time_first->tv_sec);
+		}
+		n++;
+		pr_debug("Restart_time: %ld\n", r_log->time.tv_sec);
+	}
+
+	if (time_first && n >= max_restarts_check) {
+		if ((curr_time->tv_sec - time_first->tv_sec) <
+				max_history_time_check)
+			panic("Subsystems have crashed %d times in less than %ld seconds!",
+				max_restarts_check, max_history_time_check);
+	}
+
+out:
+	mutex_unlock(&restart_log_mutex);
+}
+
+static int is_ramdump_enabled(struct subsys_device *dev)
+{
+	if (dev->desc->ramdump_disable_gpio)
+		return !dev->desc->ramdump_disable;
+
+	return enable_ramdumps;
+}
+
+static void send_sysmon_notif(struct subsys_device *dev)
+{
+	struct subsys_device *subsys;
+
+	mutex_lock(&subsys_list_lock);
+	list_for_each_entry(subsys, &subsys_list, list)
+		if ((subsys->notif_state > 0) && (subsys != dev))
+			sysmon_send_event(dev->desc, subsys->desc,
+						subsys->notif_state);
+	mutex_unlock(&subsys_list_lock);
+}
+
+static void for_each_subsys_device(struct subsys_device **list,
+		unsigned int count, void *data,
+		void (*fn)(struct subsys_device *, void *))
+{
+	while (count--) {
+		struct subsys_device *dev = *list++;
+
+		if (!dev)
+			continue;
+		fn(dev, data);
+	}
+}
+
+static void notify_each_subsys_device(struct subsys_device **list,
+		unsigned int count,
+		enum subsys_notif_type notif, void *data)
+{
+	struct subsys_device *subsys;
+
+	while (count--) {
+		struct subsys_device *dev = *list++;
+		struct notif_data notif_data;
+		struct platform_device *pdev;
+
+		if (!dev)
+			continue;
+
+		pdev = container_of(dev->desc->dev, struct platform_device,
+									dev);
+		dev->notif_state = notif;
+
+		mutex_lock(&subsys_list_lock);
+		list_for_each_entry(subsys, &subsys_list, list)
+			if (dev != subsys &&
+				subsys->track.state == SUBSYS_ONLINE)
+				sysmon_send_event(subsys->desc, dev->desc,
+								notif);
+		mutex_unlock(&subsys_list_lock);
+
+		if (notif == SUBSYS_AFTER_POWERUP &&
+				dev->track.state == SUBSYS_ONLINE)
+			send_sysmon_notif(dev);
+
+		notif_data.crashed = subsys_get_crash_status(dev);
+		notif_data.enable_ramdump = is_ramdump_enabled(dev);
+		notif_data.enable_mini_ramdumps = enable_mini_ramdumps;
+		notif_data.no_auth = dev->desc->no_auth;
+		notif_data.pdev = pdev;
+
+		subsys_notif_queue_notification(dev->notify, notif,
+								&notif_data);
+	}
+}
+
+static void enable_all_irqs(struct subsys_device *dev)
+{
+	if (dev->desc->err_ready_irq)
+		enable_irq(dev->desc->err_ready_irq);
+	if (dev->desc->wdog_bite_irq && dev->desc->wdog_bite_handler) {
+		enable_irq(dev->desc->wdog_bite_irq);
+		irq_set_irq_wake(dev->desc->wdog_bite_irq, 1);
+	}
+	if (dev->desc->err_fatal_irq && dev->desc->err_fatal_handler)
+		enable_irq(dev->desc->err_fatal_irq);
+	if (dev->desc->stop_ack_irq && dev->desc->stop_ack_handler)
+		enable_irq(dev->desc->stop_ack_irq);
+	if (dev->desc->generic_irq && dev->desc->generic_handler) {
+		enable_irq(dev->desc->generic_irq);
+		irq_set_irq_wake(dev->desc->generic_irq, 1);
+	}
+}
+
+static void disable_all_irqs(struct subsys_device *dev)
+{
+	if (dev->desc->err_ready_irq)
+		disable_irq(dev->desc->err_ready_irq);
+	if (dev->desc->wdog_bite_irq && dev->desc->wdog_bite_handler) {
+		disable_irq(dev->desc->wdog_bite_irq);
+		irq_set_irq_wake(dev->desc->wdog_bite_irq, 0);
+	}
+	if (dev->desc->err_fatal_irq && dev->desc->err_fatal_handler)
+		disable_irq(dev->desc->err_fatal_irq);
+	if (dev->desc->stop_ack_irq && dev->desc->stop_ack_handler)
+		disable_irq(dev->desc->stop_ack_irq);
+	if (dev->desc->generic_irq && dev->desc->generic_handler) {
+		disable_irq(dev->desc->generic_irq);
+		irq_set_irq_wake(dev->desc->generic_irq, 0);
+	}
+}
+
+static int wait_for_err_ready(struct subsys_device *subsys)
+{
+	int ret;
+
+	/*
+	 * If subsys is using generic_irq in which case err_ready_irq will be 0,
+	 * don't return.
+	 */
+	if ((subsys->desc->generic_irq <= 0 && !subsys->desc->err_ready_irq) ||
+				enable_debug == 1 || is_timeout_disabled())
+		return 0;
+
+	ret = wait_for_completion_timeout(&subsys->err_ready,
+					  msecs_to_jiffies(10000));
+	if (!ret) {
+		pr_err("[%s]: Error ready timed out\n", subsys->desc->name);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void subsystem_shutdown(struct subsys_device *dev, void *data)
+{
+	const char *name = dev->desc->name;
+
+	pr_info("[%p]: Shutting down %s\n", current, name);
+	if (dev->desc->shutdown(dev->desc, true) < 0)
+		panic("subsys-restart: [%p]: Failed to shutdown %s!",
+			current, name);
+	dev->crash_count++;
+	subsys_set_state(dev, SUBSYS_OFFLINE);
+	disable_all_irqs(dev);
+}
+
+static void subsystem_ramdump(struct subsys_device *dev, void *data)
+{
+	const char *name = dev->desc->name;
+
+	if (dev->desc->ramdump)
+		if (dev->desc->ramdump(is_ramdump_enabled(dev), dev->desc) < 0)
+			pr_warn("%s[%p]: Ramdump failed.\n", name, current);
+	dev->do_ramdump_on_put = false;
+}
+
+static void subsystem_free_memory(struct subsys_device *dev, void *data)
+{
+	if (dev->desc->free_memory)
+		dev->desc->free_memory(dev->desc);
+}
+
+static void subsystem_powerup(struct subsys_device *dev, void *data)
+{
+	const char *name = dev->desc->name;
+	int ret;
+
+	pr_info("[%p]: Powering up %s\n", current, name);
+	init_completion(&dev->err_ready);
+
+	if (dev->desc->powerup(dev->desc) < 0) {
+		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+								NULL);
+		panic("[%p]: Powerup error: %s!", current, name);
+	}
+	enable_all_irqs(dev);
+
+	ret = wait_for_err_ready(dev);
+	if (ret) {
+		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+								NULL);
+		panic("[%p]: Timed out waiting for error ready: %s!",
+			current, name);
+	}
+	subsys_set_state(dev, SUBSYS_ONLINE);
+	subsys_set_crash_status(dev, false);
+}
+
+static int __find_subsys(struct device *dev, void *data)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+
+	return !strcmp(subsys->desc->name, data);
+}
+
+static struct subsys_device *find_subsys(const char *str)
+{
+	struct device *dev;
+
+	if (!str)
+		return NULL;
+
+	dev = bus_find_device(&subsys_bus_type, NULL, (void *)str,
+			__find_subsys);
+	return dev ? to_subsys(dev) : NULL;
+}
+
+static int subsys_start(struct subsys_device *subsys)
+{
+	int ret;
+
+	notify_each_subsys_device(&subsys, 1, SUBSYS_BEFORE_POWERUP,
+								NULL);
+
+	init_completion(&subsys->err_ready);
+	ret = subsys->desc->powerup(subsys->desc);
+	if (ret) {
+		notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+									NULL);
+		return ret;
+	}
+	enable_all_irqs(subsys);
+
+	if (subsys->desc->is_not_loadable) {
+		subsys_set_state(subsys, SUBSYS_ONLINE);
+		return 0;
+	}
+
+	ret = wait_for_err_ready(subsys);
+	if (ret) {
+		/* pil-boot succeeded but we need to shutdown
+		 * the device because error ready timed out.
+		 */
+		notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+									NULL);
+		subsys->desc->shutdown(subsys->desc, false);
+		disable_all_irqs(subsys);
+		return ret;
+	}
+	subsys_set_state(subsys, SUBSYS_ONLINE);
+
+	notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_POWERUP,
+								NULL);
+	return ret;
+}
+
+static void subsys_stop(struct subsys_device *subsys)
+{
+	const char *name = subsys->desc->name;
+
+	notify_each_subsys_device(&subsys, 1, SUBSYS_BEFORE_SHUTDOWN, NULL);
+	if (!of_property_read_bool(subsys->desc->dev->of_node,
+					"qcom,pil-force-shutdown")) {
+		subsys_set_state(subsys, SUBSYS_OFFLINING);
+		subsys->desc->sysmon_shutdown_ret =
+				sysmon_send_shutdown(subsys->desc);
+		if (subsys->desc->sysmon_shutdown_ret)
+			pr_debug("Graceful shutdown failed for %s\n", name);
+	}
+
+	subsys->desc->shutdown(subsys->desc, false);
+	subsys_set_state(subsys, SUBSYS_OFFLINE);
+	disable_all_irqs(subsys);
+	notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_SHUTDOWN, NULL);
+}
+
+int subsystem_set_fwname(const char *name, const char *fw_name)
+{
+	struct subsys_device *subsys;
+
+	if (!name)
+		return -EINVAL;
+
+	if (!fw_name)
+		return -EINVAL;
+
+	subsys = find_subsys(name);
+	if (!subsys)
+		return -EINVAL;
+
+	pr_debug("Changing subsys [%s] fw_name to [%s]\n", name, fw_name);
+	strlcpy(subsys->desc->fw_name, fw_name,
+		sizeof(subsys->desc->fw_name));
+
+	return 0;
+}
+EXPORT_SYMBOL(subsystem_set_fwname);
+
+int wait_for_shutdown_ack(struct subsys_desc *desc)
+{
+	int count;
+	struct subsys_device *dev;
+
+	if (!desc || !desc->shutdown_ack_gpio)
+		return 0;
+
+	dev = find_subsys(desc->name);
+	if (!dev)
+		return 0;
+
+	for (count = SHUTDOWN_ACK_MAX_LOOPS; count > 0; count--) {
+		if (gpio_get_value(desc->shutdown_ack_gpio))
+			return count;
+		else if (subsys_get_crash_status(dev))
+			break;
+		msleep(SHUTDOWN_ACK_DELAY_MS);
+	}
+
+	pr_err("[%s]: Timed out waiting for shutdown ack\n", desc->name);
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(wait_for_shutdown_ack);
+
+void *__subsystem_get(const char *name, const char *fw_name)
+{
+	struct subsys_device *subsys;
+	struct subsys_device *subsys_d;
+	int ret;
+	void *retval;
+	struct subsys_tracking *track;
+
+	if (!name)
+		return NULL;
+
+	subsys = retval = find_subsys(name);
+	if (!subsys)
+		return ERR_PTR(-ENODEV);
+	if (!try_module_get(subsys->owner)) {
+		retval = ERR_PTR(-ENODEV);
+		goto err_module;
+	}
+
+	subsys_d = subsystem_get(subsys->desc->depends_on);
+	if (IS_ERR(subsys_d)) {
+		retval = subsys_d;
+		goto err_depends;
+	}
+
+	track = subsys_get_track(subsys);
+	mutex_lock(&track->lock);
+	if (!subsys->count) {
+		if (fw_name) {
+			pr_info("Changing subsys fw_name to %s\n", fw_name);
+			strlcpy(subsys->desc->fw_name, fw_name,
+				sizeof(subsys->desc->fw_name));
+		}
+		ret = subsys_start(subsys);
+		if (ret) {
+			retval = ERR_PTR(ret);
+			goto err_start;
+		}
+	}
+	subsys->count++;
+	mutex_unlock(&track->lock);
+	return retval;
+err_start:
+	mutex_unlock(&track->lock);
+	subsystem_put(subsys_d);
+err_depends:
+	module_put(subsys->owner);
+err_module:
+	put_device(&subsys->dev);
+	return retval;
+}
+
+/**
+ * subsytem_get() - Boot a subsystem
+ * @name: pointer to a string containing the name of the subsystem to boot
+ *
+ * This function returns a pointer if it succeeds. If an error occurs an
+ * ERR_PTR is returned.
+ *
+ * If this feature is disable, the value %NULL will be returned.
+ */
+void *subsystem_get(const char *name)
+{
+	return __subsystem_get(name, NULL);
+}
+EXPORT_SYMBOL(subsystem_get);
+
+/**
+ * subsystem_get_with_fwname() - Boot a subsystem using the firmware name passed
+ * @name: pointer to a string containing the name of the subsystem to boot
+ * @fw_name: pointer to a string containing the subsystem firmware image name
+ *
+ * This function returns a pointer if it succeeds. If an error occurs an
+ * ERR_PTR is returned.
+ *
+ * If this feature is disable, the value %NULL will be returned.
+ */
+void *subsystem_get_with_fwname(const char *name, const char *fw_name)
+{
+	return __subsystem_get(name, fw_name);
+}
+EXPORT_SYMBOL(subsystem_get_with_fwname);
+
+/**
+ * subsystem_put() - Shutdown a subsystem
+ * @peripheral_handle: pointer from a previous call to subsystem_get()
+ *
+ * This doesn't imply that a subsystem is shutdown until all callers of
+ * subsystem_get() have called subsystem_put().
+ */
+void subsystem_put(void *subsystem)
+{
+	struct subsys_device *subsys_d, *subsys = subsystem;
+	struct subsys_tracking *track;
+
+	if (IS_ERR_OR_NULL(subsys))
+		return;
+
+	track = subsys_get_track(subsys);
+	mutex_lock(&track->lock);
+	if (WARN(!subsys->count, "%s: %s: Reference count mismatch\n",
+			subsys->desc->name, __func__))
+		goto err_out;
+	if (!--subsys->count) {
+		subsys_stop(subsys);
+		if (subsys->do_ramdump_on_put)
+			subsystem_ramdump(subsys, NULL);
+		subsystem_free_memory(subsys, NULL);
+	}
+	mutex_unlock(&track->lock);
+
+	subsys_d = find_subsys(subsys->desc->depends_on);
+	if (subsys_d) {
+		subsystem_put(subsys_d);
+		put_device(&subsys_d->dev);
+	}
+	module_put(subsys->owner);
+	put_device(&subsys->dev);
+	return;
+err_out:
+	mutex_unlock(&track->lock);
+}
+EXPORT_SYMBOL(subsystem_put);
+
+static void subsystem_restart_wq_func(struct work_struct *work)
+{
+	struct subsys_device *dev = container_of(work,
+						struct subsys_device, work);
+	struct subsys_device **list;
+	struct subsys_desc *desc = dev->desc;
+	struct subsys_soc_restart_order *order = dev->restart_order;
+	struct subsys_tracking *track;
+	unsigned int count;
+	unsigned long flags;
+
+	/*
+	 * It's OK to not take the registration lock at this point.
+	 * This is because the subsystem list inside the relevant
+	 * restart order is not being traversed.
+	 */
+	if (order) {
+		list = order->subsys_ptrs;
+		count = order->count;
+		track = &order->track;
+	} else {
+		list = &dev;
+		count = 1;
+		track = &dev->track;
+	}
+
+	/*
+	 * If a system reboot/shutdown is under way, ignore subsystem errors.
+	 * However, print a message so that we know that a subsystem behaved
+	 * unexpectedly here.
+	 */
+	if (system_state == SYSTEM_RESTART
+		|| system_state == SYSTEM_POWER_OFF) {
+		WARN(1, "SSR aborted: %s, system reboot/shutdown is under way\n",
+			desc->name);
+		return;
+	}
+
+	mutex_lock(&track->lock);
+	do_epoch_check(dev);
+
+	if (dev->track.state == SUBSYS_OFFLINE) {
+		mutex_unlock(&track->lock);
+		WARN(1, "SSR aborted: %s subsystem not online\n", desc->name);
+		return;
+	}
+
+	/*
+	 * It's necessary to take the registration lock because the subsystem
+	 * list in the SoC restart order will be traversed and it shouldn't be
+	 * changed until _this_ restart sequence completes.
+	 */
+	mutex_lock(&soc_order_reg_lock);
+
+	pr_debug("[%p]: Starting restart sequence for %s\n", current,
+			desc->name);
+	notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL);
+	for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+	notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL);
+
+	notify_each_subsys_device(list, count, SUBSYS_RAMDUMP_NOTIFICATION,
+									NULL);
+
+	spin_lock_irqsave(&track->s_lock, flags);
+	track->p_state = SUBSYS_RESTARTING;
+	spin_unlock_irqrestore(&track->s_lock, flags);
+
+	/* Collect ram dumps for all subsystems in order here */
+	for_each_subsys_device(list, count, NULL, subsystem_ramdump);
+
+	for_each_subsys_device(list, count, NULL, subsystem_free_memory);
+
+	notify_each_subsys_device(list, count, SUBSYS_BEFORE_POWERUP, NULL);
+	for_each_subsys_device(list, count, NULL, subsystem_powerup);
+	notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL);
+
+	pr_info("[%p]: Restart sequence for %s completed.\n",
+			current, desc->name);
+
+	mutex_unlock(&soc_order_reg_lock);
+	mutex_unlock(&track->lock);
+
+	spin_lock_irqsave(&track->s_lock, flags);
+	track->p_state = SUBSYS_NORMAL;
+	__pm_relax(&dev->ssr_wlock);
+	spin_unlock_irqrestore(&track->s_lock, flags);
+}
+
+static void __subsystem_restart_dev(struct subsys_device *dev)
+{
+	struct subsys_desc *desc = dev->desc;
+	const char *name = dev->desc->name;
+	struct subsys_tracking *track;
+	unsigned long flags;
+
+	pr_debug("Restarting %s [level=%s]!\n", desc->name,
+			restart_levels[dev->restart_level]);
+
+	track = subsys_get_track(dev);
+	/*
+	 * Allow drivers to call subsystem_restart{_dev}() as many times as
+	 * they want up until the point where the subsystem is shutdown.
+	 */
+	spin_lock_irqsave(&track->s_lock, flags);
+	if (track->p_state != SUBSYS_CRASHED &&
+					dev->track.state == SUBSYS_ONLINE) {
+		if (track->p_state != SUBSYS_RESTARTING) {
+			track->p_state = SUBSYS_CRASHED;
+			__pm_stay_awake(&dev->ssr_wlock);
+			queue_work(ssr_wq, &dev->work);
+		} else {
+			panic("Subsystem %s crashed during SSR!", name);
+		}
+	} else
+		WARN(dev->track.state == SUBSYS_OFFLINE,
+			"SSR aborted: %s subsystem not online\n", name);
+	spin_unlock_irqrestore(&track->s_lock, flags);
+}
+
+static void device_restart_work_hdlr(struct work_struct *work)
+{
+	struct subsys_device *dev = container_of(work, struct subsys_device,
+							device_restart_work);
+
+	notify_each_subsys_device(&dev, 1, SUBSYS_SOC_RESET, NULL);
+	/*
+	 * Temporary workaround until ramdump userspace application calls
+	 * sync() and fclose() on attempting the dump.
+	 */
+	msleep(100);
+	panic("subsys-restart: Resetting the SoC - %s crashed.",
+							dev->desc->name);
+}
+
+int subsystem_restart_dev(struct subsys_device *dev)
+{
+	const char *name;
+
+	if (!get_device(&dev->dev))
+		return -ENODEV;
+
+	if (!try_module_get(dev->owner)) {
+		put_device(&dev->dev);
+		return -ENODEV;
+	}
+
+	name = dev->desc->name;
+
+	/*
+	 * If a system reboot/shutdown is underway, ignore subsystem errors.
+	 * However, print a message so that we know that a subsystem behaved
+	 * unexpectedly here.
+	 */
+	if (system_state == SYSTEM_RESTART
+		|| system_state == SYSTEM_POWER_OFF) {
+		pr_err("%s crashed during a system poweroff/shutdown.\n", name);
+		return -EBUSY;
+	}
+
+	pr_info("Restart sequence requested for %s, restart_level = %s.\n",
+		name, restart_levels[dev->restart_level]);
+
+	if (WARN(disable_restart_work == DISABLE_SSR,
+		"subsys-restart: Ignoring restart request for %s.\n", name)) {
+		return 0;
+	}
+
+	switch (dev->restart_level) {
+
+	case RESET_SUBSYS_COUPLED:
+		__subsystem_restart_dev(dev);
+		break;
+	case RESET_SOC:
+		__pm_stay_awake(&dev->ssr_wlock);
+		schedule_work(&dev->device_restart_work);
+		return 0;
+	default:
+		panic("subsys-restart: Unknown restart level!\n");
+		break;
+	}
+	module_put(dev->owner);
+	put_device(&dev->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL(subsystem_restart_dev);
+
+int subsystem_restart(const char *name)
+{
+	int ret;
+	struct subsys_device *dev = find_subsys(name);
+
+	if (!dev)
+		return -ENODEV;
+
+	ret = subsystem_restart_dev(dev);
+	put_device(&dev->dev);
+	return ret;
+}
+EXPORT_SYMBOL(subsystem_restart);
+
+int subsystem_crashed(const char *name)
+{
+	struct subsys_device *dev = find_subsys(name);
+	struct subsys_tracking *track;
+
+	if (!dev)
+		return -ENODEV;
+
+	if (!get_device(&dev->dev))
+		return -ENODEV;
+
+	track = subsys_get_track(dev);
+
+	mutex_lock(&track->lock);
+	dev->do_ramdump_on_put = true;
+	/*
+	 * TODO: Make this work with multiple consumers where one is calling
+	 * subsystem_restart() and another is calling this function. To do
+	 * so would require updating private state, etc.
+	 */
+	mutex_unlock(&track->lock);
+
+	put_device(&dev->dev);
+	return 0;
+}
+EXPORT_SYMBOL(subsystem_crashed);
+
+void subsys_set_crash_status(struct subsys_device *dev, bool crashed)
+{
+	dev->crashed = crashed;
+}
+
+bool subsys_get_crash_status(struct subsys_device *dev)
+{
+	return dev->crashed;
+}
+
+static struct subsys_device *desc_to_subsys(struct device *d)
+{
+	struct subsys_device *device, *subsys_dev = 0;
+
+	mutex_lock(&subsys_list_lock);
+	list_for_each_entry(device, &subsys_list, list)
+		if (device->desc->dev == d)
+			subsys_dev = device;
+	mutex_unlock(&subsys_list_lock);
+	return subsys_dev;
+}
+
+void notify_proxy_vote(struct device *device)
+{
+	struct subsys_device *dev = desc_to_subsys(device);
+
+	if (dev)
+		notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_VOTE, NULL);
+}
+
+void notify_proxy_unvote(struct device *device)
+{
+	struct subsys_device *dev = desc_to_subsys(device);
+
+	if (dev)
+		notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_UNVOTE, NULL);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t subsys_debugfs_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	int r;
+	char buf[40];
+	struct subsys_device *subsys = filp->private_data;
+
+	r = snprintf(buf, sizeof(buf), "%d\n", subsys->count);
+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t subsys_debugfs_write(struct file *filp,
+		const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	struct subsys_device *subsys = filp->private_data;
+	char buf[10];
+	char *cmp;
+
+	cnt = min(cnt, sizeof(buf) - 1);
+	if (copy_from_user(&buf, ubuf, cnt))
+		return -EFAULT;
+	buf[cnt] = '\0';
+	cmp = strstrip(buf);
+
+	if (!strcmp(cmp, "restart")) {
+		if (subsystem_restart_dev(subsys))
+			return -EIO;
+	} else if (!strcmp(cmp, "get")) {
+		if (subsystem_get(subsys->desc->name))
+			return -EIO;
+	} else if (!strcmp(cmp, "put")) {
+		subsystem_put(subsys);
+	} else {
+		return -EINVAL;
+	}
+
+	return cnt;
+}
+
+static const struct file_operations subsys_debugfs_fops = {
+	.open	= simple_open,
+	.read	= subsys_debugfs_read,
+	.write	= subsys_debugfs_write,
+};
+
+static struct dentry *subsys_base_dir;
+
+static int __init subsys_debugfs_init(void)
+{
+	subsys_base_dir = debugfs_create_dir("msm_subsys", NULL);
+	return !subsys_base_dir ? -ENOMEM : 0;
+}
+
+static void subsys_debugfs_exit(void)
+{
+	debugfs_remove_recursive(subsys_base_dir);
+}
+
+static int subsys_debugfs_add(struct subsys_device *subsys)
+{
+	if (!subsys_base_dir)
+		return -ENOMEM;
+
+	subsys->dentry = debugfs_create_file(subsys->desc->name,
+				0644, subsys_base_dir,
+				subsys, &subsys_debugfs_fops);
+	return !subsys->dentry ? -ENOMEM : 0;
+}
+
+static void subsys_debugfs_remove(struct subsys_device *subsys)
+{
+	debugfs_remove(subsys->dentry);
+}
+#else
+static int __init subsys_debugfs_init(void) { return 0; };
+static void subsys_debugfs_exit(void) { }
+static int subsys_debugfs_add(struct subsys_device *subsys) { return 0; }
+static void subsys_debugfs_remove(struct subsys_device *subsys) { }
+#endif
+
+static int subsys_device_open(struct inode *inode, struct file *file)
+{
+	struct subsys_device *device, *subsys_dev = 0;
+	void *retval;
+
+	mutex_lock(&subsys_list_lock);
+	list_for_each_entry(device, &subsys_list, list)
+		if (MINOR(device->dev_no) == iminor(inode))
+			subsys_dev = device;
+	mutex_unlock(&subsys_list_lock);
+
+	if (!subsys_dev)
+		return -EINVAL;
+
+	retval = subsystem_get_with_fwname(subsys_dev->desc->name,
+					subsys_dev->desc->fw_name);
+	if (IS_ERR(retval))
+		return PTR_ERR(retval);
+
+	return 0;
+}
+
+static int subsys_device_close(struct inode *inode, struct file *file)
+{
+	struct subsys_device *device, *subsys_dev = 0;
+
+	mutex_lock(&subsys_list_lock);
+	list_for_each_entry(device, &subsys_list, list)
+		if (MINOR(device->dev_no) == iminor(inode))
+			subsys_dev = device;
+	mutex_unlock(&subsys_list_lock);
+
+	if (!subsys_dev)
+		return -EINVAL;
+
+	subsystem_put(subsys_dev);
+	return 0;
+}
+
+static const struct file_operations subsys_device_fops = {
+		.owner = THIS_MODULE,
+		.open = subsys_device_open,
+		.release = subsys_device_close,
+};
+
+static void subsys_device_release(struct device *dev)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+
+	wakeup_source_trash(&subsys->ssr_wlock);
+	mutex_destroy(&subsys->track.lock);
+	ida_simple_remove(&subsys_ida, subsys->id);
+	kfree(subsys);
+}
+static irqreturn_t subsys_err_ready_intr_handler(int irq, void *subsys)
+{
+	struct subsys_device *subsys_dev = subsys;
+
+	dev_info(subsys_dev->desc->dev,
+		"Subsystem error monitoring/handling services are up\n");
+
+	if (subsys_dev->desc->is_not_loadable)
+		return IRQ_HANDLED;
+
+	complete(&subsys_dev->err_ready);
+	return IRQ_HANDLED;
+}
+
+static int subsys_char_device_add(struct subsys_device *subsys_dev)
+{
+	int ret = 0;
+	static int major, minor;
+	dev_t dev_no;
+
+	mutex_lock(&char_device_lock);
+	if (!major) {
+		ret = alloc_chrdev_region(&dev_no, 0, 4, "subsys");
+		if (ret < 0) {
+			pr_err("Failed to alloc subsys_dev region, err %d\n",
+									ret);
+			goto fail;
+		}
+		major = MAJOR(dev_no);
+		minor = MINOR(dev_no);
+	} else
+		dev_no = MKDEV(major, minor);
+
+	if (!device_create(char_class, subsys_dev->desc->dev, dev_no,
+			NULL, "subsys_%s", subsys_dev->desc->name)) {
+		pr_err("Failed to create subsys_%s device\n",
+						subsys_dev->desc->name);
+		goto fail_unregister_cdev_region;
+	}
+
+	cdev_init(&subsys_dev->char_dev, &subsys_device_fops);
+	subsys_dev->char_dev.owner = THIS_MODULE;
+	ret = cdev_add(&subsys_dev->char_dev, dev_no, 1);
+	if (ret < 0)
+		goto fail_destroy_device;
+
+	subsys_dev->dev_no = dev_no;
+	minor++;
+	mutex_unlock(&char_device_lock);
+
+	return 0;
+
+fail_destroy_device:
+	device_destroy(char_class, dev_no);
+fail_unregister_cdev_region:
+	unregister_chrdev_region(dev_no, 1);
+fail:
+	mutex_unlock(&char_device_lock);
+	return ret;
+}
+
+static void subsys_char_device_remove(struct subsys_device *subsys_dev)
+{
+	cdev_del(&subsys_dev->char_dev);
+	device_destroy(char_class, subsys_dev->dev_no);
+	unregister_chrdev_region(subsys_dev->dev_no, 1);
+}
+
+static void subsys_remove_restart_order(struct device_node *device)
+{
+	struct subsys_soc_restart_order *order;
+	int i;
+
+	mutex_lock(&ssr_order_mutex);
+	list_for_each_entry(order, &ssr_order_list, list)
+		for (i = 0; i < order->count; i++)
+			if (order->device_ptrs[i] == device)
+				order->subsys_ptrs[i] = NULL;
+	mutex_unlock(&ssr_order_mutex);
+}
+
+static struct subsys_soc_restart_order *ssr_parse_restart_orders(struct
+							subsys_desc * desc)
+{
+	int i, j, count, num = 0;
+	struct subsys_soc_restart_order *order, *tmp;
+	struct device *dev = desc->dev;
+	struct device_node *ssr_node;
+	uint32_t len;
+
+	if (!of_get_property(dev->of_node, "qcom,restart-group", &len))
+		return NULL;
+
+	count = len/sizeof(uint32_t);
+
+	order = devm_kzalloc(dev, sizeof(*order), GFP_KERNEL);
+	if (!order)
+		return ERR_PTR(-ENOMEM);
+
+	order->subsys_ptrs = devm_kzalloc(dev,
+				count * sizeof(struct subsys_device *),
+				GFP_KERNEL);
+	if (!order->subsys_ptrs)
+		return ERR_PTR(-ENOMEM);
+
+	order->device_ptrs = devm_kzalloc(dev,
+				count * sizeof(struct device_node *),
+				GFP_KERNEL);
+	if (!order->device_ptrs)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < count; i++) {
+		ssr_node = of_parse_phandle(dev->of_node,
+						"qcom,restart-group", i);
+		if (!ssr_node)
+			return ERR_PTR(-ENXIO);
+		of_node_put(ssr_node);
+		pr_info("%s device has been added to %s's restart group\n",
+						ssr_node->name, desc->name);
+		order->device_ptrs[i] = ssr_node;
+	}
+
+	/*
+	 * Check for similar restart groups. If found, return
+	 * without adding the new group to the ssr_order_list.
+	 */
+	mutex_lock(&ssr_order_mutex);
+	list_for_each_entry(tmp, &ssr_order_list, list) {
+		for (i = 0; i < count; i++) {
+			for (j = 0; j < count; j++) {
+				if (order->device_ptrs[j] !=
+					tmp->device_ptrs[i])
+					continue;
+				else
+					num++;
+			}
+		}
+
+		if (num == count && tmp->count == count)
+			goto err;
+		else if (num) {
+			tmp = ERR_PTR(-EINVAL);
+			goto err;
+		}
+	}
+
+	order->count = count;
+	mutex_init(&order->track.lock);
+	spin_lock_init(&order->track.s_lock);
+
+	INIT_LIST_HEAD(&order->list);
+	list_add_tail(&order->list, &ssr_order_list);
+	mutex_unlock(&ssr_order_mutex);
+
+	return order;
+err:
+	mutex_unlock(&ssr_order_mutex);
+	return tmp;
+}
+
+static int __get_gpio(struct subsys_desc *desc, const char *prop,
+		int *gpio)
+{
+	struct device_node *dnode = desc->dev->of_node;
+	int ret = -ENOENT;
+
+	if (of_find_property(dnode, prop, NULL)) {
+		*gpio = of_get_named_gpio(dnode, prop, 0);
+		ret = *gpio < 0 ? *gpio : 0;
+	}
+
+	return ret;
+}
+
+static int __get_irq(struct subsys_desc *desc, const char *prop,
+		unsigned int *irq, int *gpio)
+{
+	int ret, gpiol, irql;
+
+	ret = __get_gpio(desc, prop, &gpiol);
+	if (ret)
+		return ret;
+
+	irql = gpio_to_irq(gpiol);
+
+	if (irql == -ENOENT)
+		irql = -ENXIO;
+
+	if (irql < 0) {
+		pr_err("[%s]: Error getting IRQ \"%s\"\n", desc->name,
+				prop);
+		return irql;
+	}
+
+	if (gpio)
+		*gpio = gpiol;
+	*irq = irql;
+
+	return 0;
+}
+
+static int subsys_parse_devicetree(struct subsys_desc *desc)
+{
+	struct subsys_soc_restart_order *order;
+	int ret;
+
+	struct platform_device *pdev = container_of(desc->dev,
+					struct platform_device, dev);
+
+	ret = __get_irq(desc, "qcom,gpio-err-fatal", &desc->err_fatal_irq,
+							&desc->err_fatal_gpio);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_irq(desc, "qcom,gpio-err-ready", &desc->err_ready_irq,
+							NULL);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_irq(desc, "qcom,gpio-stop-ack", &desc->stop_ack_irq, NULL);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_gpio(desc, "qcom,gpio-force-stop", &desc->force_stop_gpio);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_gpio(desc, "qcom,gpio-ramdump-disable",
+			&desc->ramdump_disable_gpio);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_gpio(desc, "qcom,gpio-shutdown-ack",
+			&desc->shutdown_ack_gpio);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret > 0)
+		desc->wdog_bite_irq = ret;
+
+	if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,pil-generic-irq-handler")) {
+		ret = platform_get_irq(pdev, 0);
+		if (ret > 0)
+			desc->generic_irq = ret;
+	}
+
+	order = ssr_parse_restart_orders(desc);
+	if (IS_ERR(order)) {
+		pr_err("Could not initialize SSR restart order, err = %ld\n",
+							PTR_ERR(order));
+		return PTR_ERR(order);
+	}
+
+	return 0;
+}
+
+static int subsys_setup_irqs(struct subsys_device *subsys)
+{
+	struct subsys_desc *desc = subsys->desc;
+	int ret;
+
+	if (desc->err_fatal_irq && desc->err_fatal_handler) {
+		ret = devm_request_irq(desc->dev, desc->err_fatal_irq,
+				desc->err_fatal_handler,
+				IRQF_TRIGGER_RISING, desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev, "[%s]: Unable to register error fatal IRQ handler!: %d\n",
+				desc->name, ret);
+			return ret;
+		}
+		disable_irq(desc->err_fatal_irq);
+	}
+
+	if (desc->stop_ack_irq && desc->stop_ack_handler) {
+		ret = devm_request_irq(desc->dev, desc->stop_ack_irq,
+			desc->stop_ack_handler,
+			IRQF_TRIGGER_RISING, desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev, "[%s]: Unable to register stop ack handler!: %d\n",
+				desc->name, ret);
+			return ret;
+		}
+		disable_irq(desc->stop_ack_irq);
+	}
+
+	if (desc->wdog_bite_irq && desc->wdog_bite_handler) {
+		ret = devm_request_irq(desc->dev, desc->wdog_bite_irq,
+			desc->wdog_bite_handler,
+			IRQF_TRIGGER_RISING, desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev, "[%s]: Unable to register wdog bite handler!: %d\n",
+				desc->name, ret);
+			return ret;
+		}
+		disable_irq(desc->wdog_bite_irq);
+	}
+
+	if (desc->generic_irq && desc->generic_handler) {
+		ret = devm_request_irq(desc->dev, desc->generic_irq,
+			desc->generic_handler,
+			IRQF_TRIGGER_HIGH, desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev, "[%s]: Unable to register generic irq handler!: %d\n",
+				desc->name, ret);
+			return ret;
+		}
+		disable_irq(desc->generic_irq);
+	}
+
+	if (desc->err_ready_irq) {
+		ret = devm_request_irq(desc->dev,
+					desc->err_ready_irq,
+					subsys_err_ready_intr_handler,
+					IRQF_TRIGGER_RISING,
+					"error_ready_interrupt", subsys);
+		if (ret < 0) {
+			dev_err(desc->dev,
+				"[%s]: Unable to register err ready handler\n",
+				desc->name);
+			return ret;
+		}
+		disable_irq(desc->err_ready_irq);
+	}
+
+	return 0;
+}
+
+static void subsys_free_irqs(struct subsys_device *subsys)
+{
+	struct subsys_desc *desc = subsys->desc;
+
+	if (desc->err_fatal_irq && desc->err_fatal_handler)
+		devm_free_irq(desc->dev, desc->err_fatal_irq, desc);
+	if (desc->stop_ack_irq && desc->stop_ack_handler)
+		devm_free_irq(desc->dev, desc->stop_ack_irq, desc);
+	if (desc->wdog_bite_irq && desc->wdog_bite_handler)
+		devm_free_irq(desc->dev, desc->wdog_bite_irq, desc);
+	if (desc->err_ready_irq)
+		devm_free_irq(desc->dev, desc->err_ready_irq, subsys);
+}
+
+struct subsys_device *subsys_register(struct subsys_desc *desc)
+{
+	struct subsys_device *subsys;
+	struct device_node *ofnode = desc->dev->of_node;
+	int ret;
+
+	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
+	if (!subsys)
+		return ERR_PTR(-ENOMEM);
+
+	subsys->desc = desc;
+	subsys->owner = desc->owner;
+	subsys->dev.parent = desc->dev;
+	subsys->dev.bus = &subsys_bus_type;
+	subsys->dev.release = subsys_device_release;
+	subsys->notif_state = -1;
+	subsys->desc->sysmon_pid = -1;
+	strlcpy(subsys->desc->fw_name, desc->name,
+			sizeof(subsys->desc->fw_name));
+
+	subsys->notify = subsys_notif_add_subsys(desc->name);
+
+	snprintf(subsys->wlname, sizeof(subsys->wlname), "ssr(%s)", desc->name);
+	wakeup_source_init(&subsys->ssr_wlock, subsys->wlname);
+	INIT_WORK(&subsys->work, subsystem_restart_wq_func);
+	INIT_WORK(&subsys->device_restart_work, device_restart_work_hdlr);
+	spin_lock_init(&subsys->track.s_lock);
+
+	subsys->id = ida_simple_get(&subsys_ida, 0, 0, GFP_KERNEL);
+	if (subsys->id < 0) {
+		wakeup_source_trash(&subsys->ssr_wlock);
+		ret = subsys->id;
+		kfree(subsys);
+		return ERR_PTR(ret);
+	}
+
+	dev_set_name(&subsys->dev, "subsys%d", subsys->id);
+
+	mutex_init(&subsys->track.lock);
+
+	ret = subsys_debugfs_add(subsys);
+	if (ret) {
+		ida_simple_remove(&subsys_ida, subsys->id);
+		wakeup_source_trash(&subsys->ssr_wlock);
+		kfree(subsys);
+		return ERR_PTR(ret);
+	}
+
+	ret = device_register(&subsys->dev);
+	if (ret) {
+		subsys_debugfs_remove(subsys);
+		put_device(&subsys->dev);
+		kfree(subsys);
+		return ERR_PTR(ret);
+	}
+
+	ret = subsys_char_device_add(subsys);
+	if (ret)
+		goto err_register;
+
+	if (ofnode) {
+		ret = subsys_parse_devicetree(desc);
+		if (ret)
+			goto err_register;
+
+		subsys->restart_order = update_restart_order(subsys);
+
+		ret = subsys_setup_irqs(subsys);
+		if (ret < 0)
+			goto err_setup_irqs;
+
+		if (of_property_read_u32(ofnode, "qcom,ssctl-instance-id",
+					&desc->ssctl_instance_id))
+			pr_debug("Reading instance-id for %s failed\n",
+								desc->name);
+
+		if (of_property_read_u32(ofnode, "qcom,sysmon-id",
+					&subsys->desc->sysmon_pid))
+			pr_debug("Reading sysmon-id for %s failed\n",
+								desc->name);
+
+		subsys->desc->edge = of_get_property(ofnode, "qcom,edge",
+									NULL);
+		if (!subsys->desc->edge)
+			pr_debug("Reading qcom,edge for %s failed\n",
+								desc->name);
+	}
+
+	ret = sysmon_notifier_register(desc);
+	if (ret < 0)
+		goto err_sysmon_notifier;
+
+	if (subsys->desc->edge) {
+		ret = sysmon_glink_register(desc);
+		if (ret < 0)
+			goto err_sysmon_glink_register;
+	}
+	mutex_lock(&subsys_list_lock);
+	INIT_LIST_HEAD(&subsys->list);
+	list_add_tail(&subsys->list, &subsys_list);
+	mutex_unlock(&subsys_list_lock);
+
+	return subsys;
+err_sysmon_glink_register:
+	sysmon_notifier_unregister(subsys->desc);
+err_sysmon_notifier:
+	if (ofnode)
+		subsys_free_irqs(subsys);
+err_setup_irqs:
+	if (ofnode)
+		subsys_remove_restart_order(ofnode);
+err_register:
+	subsys_debugfs_remove(subsys);
+	device_unregister(&subsys->dev);
+	kfree(subsys);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(subsys_register);
+
+void subsys_unregister(struct subsys_device *subsys)
+{
+	struct subsys_device *subsys_dev, *tmp;
+	struct device_node *device = subsys->desc->dev->of_node;
+
+	if (IS_ERR_OR_NULL(subsys))
+		return;
+
+	if (get_device(&subsys->dev)) {
+		mutex_lock(&subsys_list_lock);
+		list_for_each_entry_safe(subsys_dev, tmp, &subsys_list, list)
+			if (subsys_dev == subsys)
+				list_del(&subsys->list);
+		mutex_unlock(&subsys_list_lock);
+
+		if (device) {
+			subsys_free_irqs(subsys);
+			subsys_remove_restart_order(device);
+		}
+		mutex_lock(&subsys->track.lock);
+		WARN_ON(subsys->count);
+		device_unregister(&subsys->dev);
+		mutex_unlock(&subsys->track.lock);
+		subsys_debugfs_remove(subsys);
+		subsys_char_device_remove(subsys);
+		sysmon_notifier_unregister(subsys->desc);
+		if (subsys->desc->edge)
+			sysmon_glink_unregister(subsys->desc);
+		put_device(&subsys->dev);
+	}
+}
+EXPORT_SYMBOL(subsys_unregister);
+
+static int subsys_panic(struct device *dev, void *data)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+
+	if (subsys->desc->crash_shutdown)
+		subsys->desc->crash_shutdown(subsys->desc);
+	return 0;
+}
+
+static int ssr_panic_handler(struct notifier_block *this,
+				unsigned long event, void *ptr)
+{
+	bus_for_each_dev(&subsys_bus_type, NULL, NULL, subsys_panic);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_nb = {
+	.notifier_call  = ssr_panic_handler,
+};
+
+static int __init subsys_restart_init(void)
+{
+	int ret;
+
+	ssr_wq = alloc_workqueue("ssr_wq", WQ_CPU_INTENSIVE, 0);
+	BUG_ON(!ssr_wq);
+
+	ret = bus_register(&subsys_bus_type);
+	if (ret)
+		goto err_bus;
+	ret = subsys_debugfs_init();
+	if (ret)
+		goto err_debugfs;
+
+	char_class = class_create(THIS_MODULE, "subsys");
+	if (IS_ERR(char_class)) {
+		ret = -ENOMEM;
+		pr_err("Failed to create subsys_dev class\n");
+		goto err_class;
+	}
+
+	ret = atomic_notifier_chain_register(&panic_notifier_list,
+			&panic_nb);
+	if (ret)
+		goto err_soc;
+
+	return 0;
+
+err_soc:
+	class_destroy(char_class);
+err_class:
+	subsys_debugfs_exit();
+err_debugfs:
+	bus_unregister(&subsys_bus_type);
+err_bus:
+	destroy_workqueue(ssr_wq);
+	return ret;
+}
+arch_initcall(subsys_restart_init);
+
+MODULE_DESCRIPTION("Subsystem Restart Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/sysmon-glink.c b/drivers/soc/qcom/sysmon-glink.c
new file mode 100644
index 0000000..27d9b7f
--- /dev/null
+++ b/drivers/soc/qcom/sysmon-glink.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+
+#include <soc/qcom/sysmon.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/glink.h>
+
+#define TX_BUF_SIZE	50
+#define RX_BUF_SIZE	500
+#define TIMEOUT_MS	500
+
+/**
+ * struct sysmon_subsys - Sysmon info structure for subsystem
+ * name:	subsys_desc name
+ * edge:	name of the G-Link edge.
+ * handle:	glink_ssr channel used for this subsystem.
+ * rx_buf:	Buffer used to store received message.
+ * chan_open:	Set when GLINK_CONNECTED. Reset otherwise.
+ * event:	Last stored glink state event.
+ * glink_handle:	Notifier handle reference.
+ * resp_ready:	Completion struct for event response.
+ */
+struct sysmon_subsys {
+	const char		*name;
+	const char		*edge;
+	void			*handle;
+	struct glink_link_info	*link_info;
+	char			rx_buf[RX_BUF_SIZE];
+	bool			chan_open;
+	unsigned int	event;
+	void			*glink_handle;
+	int			intent_count;
+	struct completion	resp_ready;
+	struct mutex		lock;
+	struct workqueue_struct *glink_event_wq;
+	struct work_struct	work;
+	struct list_head	list;
+};
+
+static const char *notif_name[SUBSYS_NOTIF_TYPE_COUNT] = {
+	[SUBSYS_BEFORE_SHUTDOWN] = "before_shutdown",
+	[SUBSYS_AFTER_SHUTDOWN]  = "after_shutdown",
+	[SUBSYS_BEFORE_POWERUP]  = "before_powerup",
+	[SUBSYS_AFTER_POWERUP]   = "after_powerup",
+};
+
+static LIST_HEAD(sysmon_glink_list);
+static DEFINE_MUTEX(sysmon_glink_list_lock);
+
+static struct sysmon_subsys *_find_subsys(struct subsys_desc *desc)
+{
+	struct sysmon_subsys *ss;
+
+	if (desc == NULL)
+		return NULL;
+
+	mutex_lock(&sysmon_glink_list_lock);
+	list_for_each_entry(ss, &sysmon_glink_list, list) {
+		if (!strcmp(ss->name, desc->name)) {
+			mutex_unlock(&sysmon_glink_list_lock);
+			return ss;
+		}
+	}
+	mutex_unlock(&sysmon_glink_list_lock);
+
+	return NULL;
+}
+
+static int sysmon_send_msg(struct sysmon_subsys *ss, const char *tx_buf,
+			   size_t len)
+{
+	int ret;
+	void *handle;
+
+	if (!ss->chan_open)
+		return -ENODEV;
+
+	if (!ss->handle)
+		return -EINVAL;
+
+	init_completion(&ss->resp_ready);
+	handle = ss->handle;
+
+	/* Register an intent to receive data */
+	if (!ss->intent_count) {
+		ret = glink_queue_rx_intent(handle, (void *)ss,
+						sizeof(ss->rx_buf));
+		if (ret) {
+			pr_err("Failed to register receive intent\n");
+			return ret;
+		}
+		ss->intent_count++;
+	}
+
+	pr_debug("Sending sysmon message: %s\n", tx_buf);
+	ret = glink_tx(handle, (void *)ss, (void *)tx_buf, len,
+						GLINK_TX_REQ_INTENT);
+	if (ret) {
+		pr_err("Failed to send sysmon message!\n");
+		return ret;
+	}
+
+	ret = wait_for_completion_timeout(&ss->resp_ready,
+				  msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("Timed out waiting for response\n");
+		return -ETIMEDOUT;
+	}
+	pr_debug("Received response: %s\n", ss->rx_buf);
+	return ret;
+}
+
+/**
+ * sysmon_send_event_no_qmi() - Notify a subsystem of another's state change
+ * @dest_desc:	Subsystem descriptor of the subsystem the notification
+ * should be sent to
+ * @event_desc:	Subsystem descriptor of the subsystem that generated the
+ * notification
+ * @notif:	ID of the notification type (ex. SUBSYS_BEFORE_SHUTDOWN)
+ *
+ * Returns 0 for success, -EINVAL for invalid destination or notification IDs,
+ * -ENODEV if the transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds, but with something other than an acknowledgment.
+ *
+ * If CONFIG_MSM_SYSMON_GLINK_COMM is not defined, always return success (0).
+ */
+int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+			struct subsys_desc *event_desc,
+			enum subsys_notif_type notif)
+{
+
+	char tx_buf[TX_BUF_SIZE];
+	int ret;
+	struct sysmon_subsys *ss = NULL;
+
+	ss = _find_subsys(dest_desc);
+	if (ss == NULL)
+		return -EINVAL;
+
+	if (event_desc == NULL || notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT
+			|| notif_name[notif] == NULL)
+		return -EINVAL;
+
+	snprintf(tx_buf, sizeof(tx_buf), "ssr:%s:%s", event_desc->name,
+		 notif_name[notif]);
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, strlen(tx_buf));
+	if (ret < 0) {
+		mutex_unlock(&ss->lock);
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strcmp(ss->rx_buf, "ssr:ack")) {
+		mutex_unlock(&ss->lock);
+		pr_debug("Unexpected response %s\n", ss->rx_buf);
+		ret = -EPROTO;
+		goto out;
+	}
+	mutex_unlock(&ss->lock);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_event_no_qmi);
+
+/**
+ * sysmon_send_shutdown_no_qmi() - send shutdown command to a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to send to
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_GLINK_COMM is not defined, always return success (0).
+ */
+int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc)
+{
+	struct sysmon_subsys *ss = NULL;
+	const char tx_buf[] = "system:shutdown";
+	const char expect[] = "system:ack";
+	int ret;
+
+	ss = _find_subsys(dest_desc);
+	if (ss == NULL)
+		return -EINVAL;
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, sizeof(tx_buf));
+	if (ret < 0) {
+		mutex_unlock(&ss->lock);
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strcmp(ss->rx_buf, expect)) {
+		mutex_unlock(&ss->lock);
+		pr_err("Unexpected response %s\n", ss->rx_buf);
+		ret = -EPROTO;
+		goto out;
+	}
+	mutex_unlock(&ss->lock);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_shutdown_no_qmi);
+
+/**
+ * sysmon_get_reason_no_qmi() - Retrieve failure reason from a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to query
+ * @buf:	Caller-allocated buffer for the returned NULL-terminated reason
+ * @len:	Length of @buf
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_GLINK_COMM is not defined, always return success (0).
+ */
+int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+				char *buf, size_t len)
+{
+	struct sysmon_subsys *ss = NULL;
+	const char tx_buf[] = "ssr:retrieve:sfr";
+	const char expect[] = "ssr:return:";
+	size_t prefix_len = ARRAY_SIZE(expect) - 1;
+	int ret;
+
+	ss = _find_subsys(dest_desc);
+	if (ss == NULL || buf == NULL || len == 0)
+		return -EINVAL;
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, sizeof(tx_buf));
+	if (ret < 0) {
+		mutex_unlock(&ss->lock);
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strncmp(ss->rx_buf, expect, prefix_len)) {
+		mutex_unlock(&ss->lock);
+		pr_err("Unexpected response %s\n", ss->rx_buf);
+		ret = -EPROTO;
+		goto out;
+	}
+	strlcpy(buf, ss->rx_buf + prefix_len, len);
+	mutex_unlock(&ss->lock);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_get_reason_no_qmi);
+
+static void glink_notify_rx(void *handle, const void *priv,
+		const void *pkt_priv, const void *ptr, size_t size)
+{
+	struct sysmon_subsys *ss = (struct sysmon_subsys *)priv;
+
+	if (!ss) {
+		pr_err("sysmon_subsys mapping failed\n");
+		return;
+	}
+
+	memset(ss->rx_buf, 0, sizeof(ss->rx_buf));
+	ss->intent_count--;
+	if (sizeof(ss->rx_buf) > size)
+		strlcpy(ss->rx_buf, ptr, size);
+	else
+		pr_warn("Invalid recv message size\n");
+	glink_rx_done(ss->handle, ptr, false);
+	complete(&ss->resp_ready);
+}
+
+static void glink_notify_tx_done(void *handle, const void *priv,
+		const void *pkt_priv, const void *ptr)
+{
+	struct sysmon_subsys *cb_data = (struct sysmon_subsys *)priv;
+
+	if (!cb_data)
+		pr_err("sysmon_subsys mapping failed\n");
+	else
+		pr_debug("tx_done notification!\n");
+}
+
+static void glink_notify_state(void *handle, const void *priv,
+		unsigned int event)
+{
+	struct sysmon_subsys *ss = (struct sysmon_subsys *)priv;
+
+	if (!ss) {
+		pr_err("sysmon_subsys mapping failed\n");
+		return;
+	}
+
+	mutex_lock(&ss->lock);
+	ss->event = event;
+	switch (event) {
+	case GLINK_CONNECTED:
+		ss->chan_open = true;
+		break;
+	case GLINK_REMOTE_DISCONNECTED:
+		ss->chan_open = false;
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&ss->lock);
+}
+
+static void glink_state_up_work_hdlr(struct work_struct *work)
+{
+	struct glink_open_config open_cfg;
+	struct sysmon_subsys *ss = container_of(work, struct sysmon_subsys,
+							work);
+	void *handle = NULL;
+
+	if (!ss) {
+		pr_err("Invalid sysmon_subsys struct parameter\n");
+		return;
+	}
+
+	memset(&open_cfg, 0, sizeof(struct glink_open_config));
+	open_cfg.priv = (void *)ss;
+	open_cfg.notify_rx = glink_notify_rx;
+	open_cfg.notify_tx_done = glink_notify_tx_done;
+	open_cfg.notify_state = glink_notify_state;
+	open_cfg.edge = ss->edge;
+	open_cfg.transport = "smd_trans";
+	open_cfg.name = "sys_mon";
+
+	handle = glink_open(&open_cfg);
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("%s: %s: unable to open channel\n",
+					open_cfg.edge, open_cfg.name);
+		return;
+	}
+	ss->handle = handle;
+}
+
+static void glink_state_down_work_hdlr(struct work_struct *work)
+{
+	struct sysmon_subsys *ss = container_of(work, struct sysmon_subsys,
+							work);
+
+	if (ss->handle)
+		glink_close(ss->handle);
+	ss->handle = NULL;
+}
+
+static void sysmon_glink_cb(struct glink_link_state_cb_info *cb_info,
+					void *priv)
+{
+	struct sysmon_subsys *ss = (struct sysmon_subsys *)priv;
+
+	if (!cb_info || !ss) {
+		pr_err("Invalid parameters\n");
+		return;
+	}
+
+	mutex_lock(&ss->lock);
+	switch (cb_info->link_state) {
+	case GLINK_LINK_STATE_UP:
+		pr_debug("LINK UP %s\n", ss->edge);
+		INIT_WORK(&ss->work, glink_state_up_work_hdlr);
+		queue_work(ss->glink_event_wq, &ss->work);
+		break;
+	case GLINK_LINK_STATE_DOWN:
+		pr_debug("LINK DOWN %s\n", ss->edge);
+		INIT_WORK(&ss->work, glink_state_down_work_hdlr);
+		queue_work(ss->glink_event_wq, &ss->work);
+		break;
+	default:
+		pr_warn("Invalid event notification\n");
+		break;
+	}
+	mutex_unlock(&ss->lock);
+}
+
+int sysmon_glink_register(struct subsys_desc *desc)
+{
+	struct sysmon_subsys *ss;
+	struct glink_link_info *link_info;
+	int ret;
+
+	if (!desc)
+		return -EINVAL;
+
+	ss = kzalloc(sizeof(*ss), GFP_KERNEL);
+	if (!ss)
+		return -ENOMEM;
+
+	link_info = kzalloc(sizeof(struct glink_link_info), GFP_KERNEL);
+	if (!link_info) {
+		pr_err("Could not allocate link info structure\n");
+		kfree(ss);
+		return -ENOMEM;
+	}
+
+	ss->glink_event_wq = create_singlethread_workqueue(desc->name);
+	if (ss->glink_event_wq == NULL) {
+		ret = -ENOMEM;
+		goto err_wq;
+	}
+	mutex_init(&ss->lock);
+
+	ss->name = desc->name;
+	ss->handle = NULL;
+	ss->intent_count = 0;
+	ss->link_info = link_info;
+	ss->link_info->edge = ss->edge = desc->edge;
+	ss->link_info->transport = "smd_trans";
+	ss->link_info->glink_link_state_notif_cb = sysmon_glink_cb;
+
+	ss->glink_handle = glink_register_link_state_cb(ss->link_info,
+								(void *)ss);
+	if (IS_ERR_OR_NULL(ss->glink_handle)) {
+		pr_err("Could not register link state cb\n");
+		ret = PTR_ERR(ss->glink_handle);
+		goto err;
+	}
+
+	mutex_lock(&sysmon_glink_list_lock);
+	INIT_LIST_HEAD(&ss->list);
+	list_add_tail(&ss->list, &sysmon_glink_list);
+	mutex_unlock(&sysmon_glink_list_lock);
+	return 0;
+err:
+	destroy_workqueue(ss->glink_event_wq);
+err_wq:
+	kfree(link_info);
+	kfree(ss);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_glink_register);
+
+void sysmon_glink_unregister(struct subsys_desc *desc)
+{
+	struct sysmon_subsys *ss = NULL;
+
+	if (!desc)
+		return;
+
+	ss = _find_subsys(desc);
+	if (ss == NULL)
+		return;
+
+	list_del(&ss->list);
+	if (ss->handle)
+		glink_close(ss->handle);
+	destroy_workqueue(ss->glink_event_wq);
+	glink_unregister_link_state_cb(ss->glink_handle);
+	kfree(ss->link_info);
+	kfree(ss);
+}
+EXPORT_SYMBOL(sysmon_glink_unregister);
diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c
new file mode 100644
index 0000000..a087ad6
--- /dev/null
+++ b/drivers/soc/qcom/sysmon-qmi.c
@@ -0,0 +1,732 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "sysmon-qmi: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/sysmon.h>
+
+#define QMI_RESP_BIT_SHIFT(x)			(x << 16)
+
+#define QMI_SSCTL_RESTART_REQ_V02		0x0020
+#define QMI_SSCTL_RESTART_RESP_V02		0x0020
+#define QMI_SSCTL_RESTART_READY_IND_V02		0x0020
+#define QMI_SSCTL_SHUTDOWN_REQ_V02		0x0021
+#define QMI_SSCTL_SHUTDOWN_RESP_V02		0x0021
+#define QMI_SSCTL_SHUTDOWN_READY_IND_V02	0x0021
+#define QMI_SSCTL_GET_FAILURE_REASON_REQ_V02	0x0022
+#define QMI_SSCTL_GET_FAILURE_REASON_RESP_V02	0x0022
+#define QMI_SSCTL_SUBSYS_EVENT_REQ_V02		0x0023
+#define QMI_SSCTL_SUBSYS_EVENT_RESP_V02		0x0023
+#define QMI_SSCTL_SUBSYS_EVENT_READY_IND_V02	0x0023
+
+#define QMI_SSCTL_ERROR_MSG_LENGTH		90
+#define QMI_SSCTL_SUBSYS_NAME_LENGTH		15
+#define QMI_SSCTL_SUBSYS_EVENT_REQ_LENGTH	40
+#define QMI_SSCTL_RESP_MSG_LENGTH		7
+#define QMI_SSCTL_EMPTY_MSG_LENGTH		0
+
+#define SSCTL_SERVICE_ID			0x2B
+#define SSCTL_VER_2				2
+#define SERVER_TIMEOUT				500
+#define SHUTDOWN_TIMEOUT			10000
+
+#define QMI_EOTI_DATA_TYPE	\
+{				\
+	.data_type = QMI_EOTI,	\
+	.elem_len  = 0,		\
+	.elem_size = 0,		\
+	.is_array  = NO_ARRAY,	\
+	.tlv_type  = 0x00,	\
+	.offset    = 0,		\
+	.ei_array  = NULL,	\
+},
+
+struct sysmon_qmi_data {
+	const char *name;
+	int instance_id;
+	struct work_struct svc_arrive;
+	struct work_struct svc_exit;
+	struct work_struct svc_rcv_msg;
+	struct qmi_handle *clnt_handle;
+	struct notifier_block notifier;
+	void *notif_handle;
+	bool legacy_version;
+	struct completion server_connect;
+	struct completion ind_recv;
+	struct list_head list;
+};
+
+static struct workqueue_struct *sysmon_wq;
+
+static LIST_HEAD(sysmon_list);
+static DEFINE_MUTEX(sysmon_list_lock);
+static DEFINE_MUTEX(sysmon_lock);
+
+static void sysmon_clnt_recv_msg(struct work_struct *work);
+static void sysmon_clnt_svc_arrive(struct work_struct *work);
+static void sysmon_clnt_svc_exit(struct work_struct *work);
+
+static const int notif_map[SUBSYS_NOTIF_TYPE_COUNT] = {
+	[SUBSYS_BEFORE_POWERUP] = SSCTL_SSR_EVENT_BEFORE_POWERUP,
+	[SUBSYS_AFTER_POWERUP] = SSCTL_SSR_EVENT_AFTER_POWERUP,
+	[SUBSYS_BEFORE_SHUTDOWN] = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN,
+	[SUBSYS_AFTER_SHUTDOWN] = SSCTL_SSR_EVENT_AFTER_SHUTDOWN,
+};
+
+static void sysmon_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
+			void *msg, unsigned int msg_len, void *ind_cb_priv)
+{
+	struct sysmon_qmi_data *data = NULL, *temp;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(temp, &sysmon_list, list)
+		if (!strcmp(temp->name, (char *)ind_cb_priv))
+			data = temp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (!data)
+		return;
+
+	pr_debug("%s: Indication received from subsystem\n", data->name);
+	complete(&data->ind_recv);
+}
+
+static int sysmon_svc_event_notify(struct notifier_block *this,
+				      unsigned long code,
+				      void *_cmd)
+{
+	struct sysmon_qmi_data *data = container_of(this,
+					struct sysmon_qmi_data, notifier);
+
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		queue_work(sysmon_wq, &data->svc_arrive);
+		break;
+	case QMI_SERVER_EXIT:
+		queue_work(sysmon_wq, &data->svc_exit);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static void sysmon_clnt_notify(struct qmi_handle *handle,
+			     enum qmi_event_type event, void *notify_priv)
+{
+	struct sysmon_qmi_data *data = container_of(notify_priv,
+					struct sysmon_qmi_data, svc_arrive);
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		schedule_work(&data->svc_rcv_msg);
+		break;
+	default:
+		break;
+	}
+}
+
+static void sysmon_clnt_svc_arrive(struct work_struct *work)
+{
+	int rc;
+	struct sysmon_qmi_data *data = container_of(work,
+					struct sysmon_qmi_data, svc_arrive);
+
+	/* Create a Local client port for QMI communication */
+	data->clnt_handle = qmi_handle_create(sysmon_clnt_notify, work);
+	if (!data->clnt_handle) {
+		pr_err("QMI client handle alloc failed for %s\n", data->name);
+		return;
+	}
+
+	rc = qmi_connect_to_service(data->clnt_handle, SSCTL_SERVICE_ID,
+					SSCTL_VER_2, data->instance_id);
+	if (rc < 0) {
+		pr_err("%s: Could not connect handle to service\n",
+								data->name);
+		qmi_handle_destroy(data->clnt_handle);
+		data->clnt_handle = NULL;
+		return;
+	}
+	pr_info("Connection established between QMI handle and %s's SSCTL service\n"
+								, data->name);
+
+	rc = qmi_register_ind_cb(data->clnt_handle, sysmon_ind_cb,
+							(void *)data->name);
+	if (rc < 0)
+		pr_warn("%s: Could not register the indication callback\n",
+								data->name);
+}
+
+static void sysmon_clnt_svc_exit(struct work_struct *work)
+{
+	struct sysmon_qmi_data *data = container_of(work,
+					struct sysmon_qmi_data, svc_exit);
+
+	qmi_handle_destroy(data->clnt_handle);
+	data->clnt_handle = NULL;
+}
+
+static void sysmon_clnt_recv_msg(struct work_struct *work)
+{
+	int ret;
+	struct sysmon_qmi_data *data = container_of(work,
+					struct sysmon_qmi_data, svc_rcv_msg);
+
+	do {
+		pr_debug("%s: Notified about a Receive event\n", data->name);
+	} while ((ret = qmi_recv_msg(data->clnt_handle)) == 0);
+
+	if (ret != -ENOMSG)
+		pr_err("%s: Error receiving message\n", data->name);
+}
+
+struct qmi_ssctl_subsys_event_req_msg {
+	uint8_t subsys_name_len;
+	char subsys_name[QMI_SSCTL_SUBSYS_NAME_LENGTH];
+	enum ssctl_ssr_event_enum_type event;
+	uint8_t evt_driven_valid;
+	enum ssctl_ssr_event_driven_enum_type evt_driven;
+};
+
+struct qmi_ssctl_subsys_event_resp_msg {
+	struct qmi_response_type_v01 resp;
+};
+
+static struct elem_info qmi_ssctl_subsys_event_req_msg_ei[] = {
+	{
+		.data_type = QMI_DATA_LEN,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x01,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      subsys_name_len),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len  = QMI_SSCTL_SUBSYS_NAME_LENGTH,
+		.elem_size = sizeof(char),
+		.is_array  = VAR_LEN_ARRAY,
+		.tlv_type  = 0x01,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      subsys_name),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint32_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      event),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      evt_driven_valid),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint32_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      evt_driven),
+		.ei_array  = NULL,
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+static struct elem_info qmi_ssctl_subsys_event_resp_msg_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_resp_msg,
+				      resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+/**
+ * sysmon_send_event() - Notify a subsystem of another's state change
+ * @dest_desc:	Subsystem descriptor of the subsystem the notification
+ * should be sent to
+ * @event_desc:	Subsystem descriptor of the subsystem that generated the
+ * notification
+ * @notif:	ID of the notification type (ex. SUBSYS_BEFORE_SHUTDOWN)
+ *
+ * Reverts to using legacy sysmon API (sysmon_send_event_no_qmi()) if
+ * client handle is not set.
+ *
+ * Returns 0 for success, -EINVAL for invalid destination or notification IDs,
+ * -ENODEV if the transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds, but with something other than an acknowledgment.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_event(struct subsys_desc *dest_desc,
+			struct subsys_desc *event_desc,
+			enum subsys_notif_type notif)
+{
+	struct qmi_ssctl_subsys_event_req_msg req;
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_ssctl_subsys_event_resp_msg resp = { { 0, 0 } };
+	struct sysmon_qmi_data *data = NULL, *temp;
+	const char *event_ss = event_desc->name;
+	const char *dest_ss = dest_desc->name;
+	int ret;
+
+	if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || event_ss == NULL
+		|| dest_ss == NULL)
+		return -EINVAL;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(temp, &sysmon_list, list)
+		if (!strcmp(temp->name, dest_desc->name))
+			data = temp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (!data)
+		return -EINVAL;
+
+	if (!data->clnt_handle) {
+		pr_debug("No SSCTL_V2 support for %s. Revert to SSCTL_V0\n",
+								dest_ss);
+		ret = sysmon_send_event_no_qmi(dest_desc, event_desc, notif);
+		if (ret)
+			pr_debug("SSCTL_V0 implementation failed - %d\n", ret);
+
+		return ret;
+	}
+
+	snprintf(req.subsys_name, ARRAY_SIZE(req.subsys_name), "%s", event_ss);
+	req.subsys_name_len = strlen(req.subsys_name);
+	req.event = notif_map[notif];
+	req.evt_driven_valid = 1;
+	req.evt_driven = SSCTL_SSR_EVENT_FORCED;
+
+	req_desc.msg_id = QMI_SSCTL_SUBSYS_EVENT_REQ_V02;
+	req_desc.max_msg_len = QMI_SSCTL_SUBSYS_EVENT_REQ_LENGTH;
+	req_desc.ei_array = qmi_ssctl_subsys_event_req_msg_ei;
+
+	resp_desc.msg_id = QMI_SSCTL_SUBSYS_EVENT_RESP_V02;
+	resp_desc.max_msg_len = QMI_SSCTL_RESP_MSG_LENGTH;
+	resp_desc.ei_array = qmi_ssctl_subsys_event_resp_msg_ei;
+
+	mutex_lock(&sysmon_lock);
+	ret = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+		sizeof(req), &resp_desc, &resp, sizeof(resp), SERVER_TIMEOUT);
+	if (ret < 0) {
+		pr_err("QMI send req to %s failed, ret - %d\n", dest_ss, ret);
+		goto out;
+	}
+
+	/* Check the response */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+		pr_debug("QMI request failed 0x%x\n",
+					QMI_RESP_BIT_SHIFT(resp.resp.error));
+		ret = -EREMOTEIO;
+	}
+out:
+	mutex_unlock(&sysmon_lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_event);
+
+struct qmi_ssctl_shutdown_req_msg {
+};
+
+struct qmi_ssctl_shutdown_resp_msg {
+	struct qmi_response_type_v01 resp;
+};
+
+static struct elem_info qmi_ssctl_shutdown_req_msg_ei[] = {
+	QMI_EOTI_DATA_TYPE
+};
+
+static struct elem_info qmi_ssctl_shutdown_resp_msg_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct qmi_ssctl_shutdown_resp_msg,
+				      resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+/**
+ * sysmon_send_shutdown() - send shutdown command to a
+ * subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to send to
+ *
+ * Reverts to using legacy sysmon API (sysmon_send_shutdown_no_qmi()) if
+ * client handle is not set.
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_shutdown(struct subsys_desc *dest_desc)
+{
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_ssctl_shutdown_resp_msg resp = { { 0, 0 } };
+	struct sysmon_qmi_data *data = NULL, *temp;
+	const char *dest_ss = dest_desc->name;
+	char req = 0;
+	int ret, shutdown_ack_ret;
+
+	if (dest_ss == NULL)
+		return -EINVAL;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(temp, &sysmon_list, list)
+		if (!strcmp(temp->name, dest_desc->name))
+			data = temp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (!data)
+		return -EINVAL;
+
+	if (!data->clnt_handle) {
+		pr_debug("No SSCTL_V2 support for %s. Revert to SSCTL_V0\n",
+								dest_ss);
+		ret = sysmon_send_shutdown_no_qmi(dest_desc);
+		if (ret)
+			pr_debug("SSCTL_V0 implementation failed - %d\n", ret);
+
+		return ret;
+	}
+
+	req_desc.msg_id = QMI_SSCTL_SHUTDOWN_REQ_V02;
+	req_desc.max_msg_len = QMI_SSCTL_EMPTY_MSG_LENGTH;
+	req_desc.ei_array = qmi_ssctl_shutdown_req_msg_ei;
+
+	resp_desc.msg_id = QMI_SSCTL_SHUTDOWN_RESP_V02;
+	resp_desc.max_msg_len = QMI_SSCTL_RESP_MSG_LENGTH;
+	resp_desc.ei_array = qmi_ssctl_shutdown_resp_msg_ei;
+
+	reinit_completion(&data->ind_recv);
+	mutex_lock(&sysmon_lock);
+	ret = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+		sizeof(req), &resp_desc, &resp, sizeof(resp), SERVER_TIMEOUT);
+	if (ret < 0) {
+		pr_err("QMI send req to %s failed, ret - %d\n", dest_ss, ret);
+		goto out;
+	}
+
+	/* Check the response */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+		pr_err("QMI request failed 0x%x\n",
+					QMI_RESP_BIT_SHIFT(resp.resp.error));
+		ret = -EREMOTEIO;
+		goto out;
+	}
+
+	shutdown_ack_ret = wait_for_shutdown_ack(dest_desc);
+	if (shutdown_ack_ret < 0) {
+		pr_err("shutdown_ack SMP2P bit for %s not set\n", data->name);
+		if (!&data->ind_recv.done) {
+			pr_err("QMI shutdown indication not received\n");
+			ret = shutdown_ack_ret;
+		}
+		goto out;
+	} else if (shutdown_ack_ret > 0)
+		goto out;
+
+	if (!wait_for_completion_timeout(&data->ind_recv,
+					msecs_to_jiffies(SHUTDOWN_TIMEOUT))) {
+		pr_err("Timed out waiting for shutdown indication from %s\n",
+							data->name);
+		ret = -ETIMEDOUT;
+	}
+out:
+	mutex_unlock(&sysmon_lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_shutdown);
+
+struct qmi_ssctl_get_failure_reason_req_msg {
+};
+
+struct qmi_ssctl_get_failure_reason_resp_msg {
+	struct qmi_response_type_v01 resp;
+	uint8_t error_message_valid;
+	uint32_t error_message_len;
+	char error_message[QMI_SSCTL_ERROR_MSG_LENGTH];
+};
+
+static struct elem_info qmi_ssctl_get_failure_reason_req_msg_ei[] = {
+	QMI_EOTI_DATA_TYPE
+};
+
+static struct elem_info qmi_ssctl_get_failure_reason_resp_msg_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(
+			struct qmi_ssctl_get_failure_reason_resp_msg,
+							resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(
+			struct qmi_ssctl_get_failure_reason_resp_msg,
+						error_message_valid),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_DATA_LEN,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(
+			struct qmi_ssctl_get_failure_reason_resp_msg,
+						error_message_len),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len  = QMI_SSCTL_ERROR_MSG_LENGTH,
+		.elem_size = sizeof(char),
+		.is_array  = VAR_LEN_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(
+			struct qmi_ssctl_get_failure_reason_resp_msg,
+						error_message),
+		.ei_array  = NULL,
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+/**
+ * sysmon_get_reason() - Retrieve failure reason from a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to query
+ * @buf:	Caller-allocated buffer for the returned NUL-terminated reason
+ * @len:	Length of @buf
+ *
+ * Reverts to using legacy sysmon API (sysmon_get_reason_no_qmi()) if client
+ * handle is not set.
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_get_reason(struct subsys_desc *dest_desc, char *buf, size_t len)
+{
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_ssctl_get_failure_reason_resp_msg resp;
+	struct sysmon_qmi_data *data = NULL, *temp;
+	const char *dest_ss = dest_desc->name;
+	const char expect[] = "ssr:return:";
+	char req = 0;
+	int ret;
+
+	if (dest_ss == NULL || buf == NULL || len == 0)
+		return -EINVAL;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(temp, &sysmon_list, list)
+		if (!strcmp(temp->name, dest_desc->name))
+			data = temp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (!data)
+		return -EINVAL;
+
+	if (!data->clnt_handle) {
+		pr_debug("No SSCTL_V2 support for %s. Revert to SSCTL_V0\n",
+								dest_ss);
+		ret = sysmon_get_reason_no_qmi(dest_desc, buf, len);
+		if (ret)
+			pr_debug("SSCTL_V0 implementation failed - %d\n", ret);
+
+		return ret;
+	}
+
+	req_desc.msg_id = QMI_SSCTL_GET_FAILURE_REASON_REQ_V02;
+	req_desc.max_msg_len = QMI_SSCTL_EMPTY_MSG_LENGTH;
+	req_desc.ei_array = qmi_ssctl_get_failure_reason_req_msg_ei;
+
+	resp_desc.msg_id = QMI_SSCTL_GET_FAILURE_REASON_RESP_V02;
+	resp_desc.max_msg_len = QMI_SSCTL_ERROR_MSG_LENGTH;
+	resp_desc.ei_array = qmi_ssctl_get_failure_reason_resp_msg_ei;
+
+	mutex_lock(&sysmon_lock);
+	ret = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+		sizeof(req), &resp_desc, &resp, sizeof(resp), SERVER_TIMEOUT);
+	if (ret < 0) {
+		pr_err("QMI send req to %s failed, ret - %d\n", dest_ss, ret);
+		goto out;
+	}
+
+	/* Check the response */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+		pr_err("QMI request failed 0x%x\n",
+					QMI_RESP_BIT_SHIFT(resp.resp.error));
+		ret = -EREMOTEIO;
+		goto out;
+	}
+
+	if (!strcmp(resp.error_message, expect)) {
+		pr_err("Unexpected response %s\n", resp.error_message);
+		ret = -EPROTO;
+		goto out;
+	}
+	strlcpy(buf, resp.error_message, resp.error_message_len);
+out:
+	mutex_unlock(&sysmon_lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_get_reason);
+
+/**
+ * sysmon_notifier_register() - Initialize sysmon data for a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem
+ *
+ * Returns 0 for success. If the subsystem does not support SSCTL v2, a
+ * value of 0 is returned after adding the subsystem entry to the sysmon_list.
+ * In addition, if the SSCTL v2 support exists, the notifier block to receive
+ * events from the SSCTL service on the subsystem is registered.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_notifier_register(struct subsys_desc *desc)
+{
+	struct sysmon_qmi_data *data;
+	int rc = 0;
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->name = desc->name;
+	data->instance_id = desc->ssctl_instance_id;
+	data->clnt_handle = NULL;
+	data->legacy_version = false;
+
+	mutex_lock(&sysmon_list_lock);
+	if (data->instance_id <= 0) {
+		pr_debug("SSCTL instance id not defined\n");
+		goto add_list;
+	}
+
+	if (sysmon_wq)
+		goto notif_register;
+
+	sysmon_wq = create_singlethread_workqueue("sysmon_wq");
+	if (!sysmon_wq) {
+		mutex_unlock(&sysmon_list_lock);
+		pr_err("Could not create workqueue\n");
+		kfree(data);
+		return -ENOMEM;
+	}
+
+notif_register:
+	data->notifier.notifier_call = sysmon_svc_event_notify;
+	init_completion(&data->ind_recv);
+
+	INIT_WORK(&data->svc_arrive, sysmon_clnt_svc_arrive);
+	INIT_WORK(&data->svc_exit, sysmon_clnt_svc_exit);
+	INIT_WORK(&data->svc_rcv_msg, sysmon_clnt_recv_msg);
+
+	rc = qmi_svc_event_notifier_register(SSCTL_SERVICE_ID, SSCTL_VER_2,
+					data->instance_id, &data->notifier);
+	if (rc < 0)
+		pr_err("Notifier register failed for %s\n", data->name);
+add_list:
+	INIT_LIST_HEAD(&data->list);
+	list_add_tail(&data->list, &sysmon_list);
+	mutex_unlock(&sysmon_list_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(sysmon_notifier_register);
+
+/**
+ * sysmon_notifier_unregister() - Cleanup the subsystem's sysmon data.
+ * @dest_desc:	Subsystem descriptor of the subsystem
+ *
+ * If the subsystem does not support SSCTL v2, its entry is simply removed from
+ * the sysmon_list. In addition, if the SSCTL v2 support exists, the notifier
+ * block to receive events from the SSCTL service is unregistered.
+ */
+void sysmon_notifier_unregister(struct subsys_desc *desc)
+{
+	struct sysmon_qmi_data *data = NULL, *sysmon_data, *tmp;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry_safe(sysmon_data, tmp, &sysmon_list, list)
+		if (!strcmp(sysmon_data->name, desc->name)) {
+			data = sysmon_data;
+			list_del(&data->list);
+		}
+
+	if (data == NULL)
+		goto exit;
+
+	if (data->instance_id > 0)
+		qmi_svc_event_notifier_unregister(SSCTL_SERVICE_ID,
+			SSCTL_VER_2, data->instance_id, &data->notifier);
+
+	if (sysmon_wq && list_empty(&sysmon_list))
+		destroy_workqueue(sysmon_wq);
+exit:
+	mutex_unlock(&sysmon_list_lock);
+	kfree(data);
+}
+EXPORT_SYMBOL(sysmon_notifier_unregister);
diff --git a/drivers/soc/qcom/sysmon.c b/drivers/soc/qcom/sysmon.c
new file mode 100644
index 0000000..9810c3f
--- /dev/null
+++ b/drivers/soc/qcom/sysmon.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (c) 2011-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <soc/qcom/hsic_sysmon.h>
+#include <soc/qcom/sysmon.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/smd.h>
+
+#define TX_BUF_SIZE	50
+#define RX_BUF_SIZE	500
+#define TIMEOUT_MS	500
+
+enum transports {
+	TRANSPORT_SMD,
+	TRANSPORT_HSIC,
+};
+
+struct sysmon_subsys {
+	struct mutex		lock;
+	struct smd_channel	*chan;
+	bool			chan_open;
+	struct completion	resp_ready;
+	char			rx_buf[RX_BUF_SIZE];
+	enum transports		transport;
+	struct device		*dev;
+	u32			pid;
+	struct list_head	list;
+};
+
+static const char *notif_name[SUBSYS_NOTIF_TYPE_COUNT] = {
+	[SUBSYS_BEFORE_SHUTDOWN] = "before_shutdown",
+	[SUBSYS_AFTER_SHUTDOWN]  = "after_shutdown",
+	[SUBSYS_BEFORE_POWERUP]  = "before_powerup",
+	[SUBSYS_AFTER_POWERUP]   = "after_powerup",
+};
+
+static LIST_HEAD(sysmon_list);
+static DEFINE_MUTEX(sysmon_list_lock);
+
+static int sysmon_send_smd(struct sysmon_subsys *ss, const char *tx_buf,
+			   size_t len)
+{
+	int ret;
+
+	if (!ss->chan_open)
+		return -ENODEV;
+
+	init_completion(&ss->resp_ready);
+	pr_debug("Sending SMD message: %s\n", tx_buf);
+	smd_write(ss->chan, tx_buf, len);
+	ret = wait_for_completion_timeout(&ss->resp_ready,
+				  msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int sysmon_send_hsic(struct sysmon_subsys *ss, const char *tx_buf,
+			    size_t len)
+{
+	int ret;
+	size_t actual_len;
+
+	pr_debug("Sending HSIC message: %s\n", tx_buf);
+	ret = hsic_sysmon_write(HSIC_SYSMON_DEV_EXT_MODEM,
+				tx_buf, len, TIMEOUT_MS);
+	if (ret)
+		return ret;
+	ret = hsic_sysmon_read(HSIC_SYSMON_DEV_EXT_MODEM, ss->rx_buf,
+			       ARRAY_SIZE(ss->rx_buf), &actual_len, TIMEOUT_MS);
+	return ret;
+}
+
+static int sysmon_send_msg(struct sysmon_subsys *ss, const char *tx_buf,
+			   size_t len)
+{
+	int ret;
+
+	switch (ss->transport) {
+	case TRANSPORT_SMD:
+		ret = sysmon_send_smd(ss, tx_buf, len);
+		break;
+	case TRANSPORT_HSIC:
+		ret = sysmon_send_hsic(ss, tx_buf, len);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	if (!ret)
+		pr_debug("Received response: %s\n", ss->rx_buf);
+
+	return ret;
+}
+
+/**
+ * sysmon_send_event_no_qmi() - Notify a subsystem of another's state change
+ * @dest_desc:	Subsystem descriptor of the subsystem the notification
+ * should be sent to
+ * @event_desc:	Subsystem descriptor of the subsystem that generated the
+ * notification
+ * @notif:	ID of the notification type (ex. SUBSYS_BEFORE_SHUTDOWN)
+ *
+ * Returns 0 for success, -EINVAL for invalid destination or notification IDs,
+ * -ENODEV if the transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds, but with something other than an acknowledgment.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+			struct subsys_desc *event_desc,
+			enum subsys_notif_type notif)
+{
+
+	char tx_buf[TX_BUF_SIZE];
+	int ret;
+	struct sysmon_subsys *tmp, *ss = NULL;
+	const char *event_ss = event_desc->name;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(tmp, &sysmon_list, list)
+		if (tmp->pid == dest_desc->sysmon_pid)
+			ss = tmp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (ss == NULL)
+		return -EINVAL;
+
+	if (ss->dev == NULL)
+		return -ENODEV;
+
+	if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || event_ss == NULL ||
+						notif_name[notif] == NULL)
+		return -EINVAL;
+
+	snprintf(tx_buf, ARRAY_SIZE(tx_buf), "ssr:%s:%s", event_ss,
+		 notif_name[notif]);
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, strlen(tx_buf));
+	if (ret) {
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strcmp(ss->rx_buf, "ssr:ack")) {
+		pr_debug("Unexpected response %s\n", ss->rx_buf);
+		ret = -EPROTO;
+	}
+out:
+	mutex_unlock(&ss->lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_event_no_qmi);
+
+/**
+ * sysmon_send_shutdown_no_qmi() - send shutdown command to a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to send to
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc)
+{
+	struct sysmon_subsys *tmp, *ss = NULL;
+	const char tx_buf[] = "system:shutdown";
+	const char expect[] = "system:ack";
+	int ret;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(tmp, &sysmon_list, list)
+		if (tmp->pid == dest_desc->sysmon_pid)
+			ss = tmp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (ss == NULL)
+		return -EINVAL;
+
+	if (ss->dev == NULL)
+		return -ENODEV;
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, ARRAY_SIZE(tx_buf));
+	if (ret) {
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strcmp(ss->rx_buf, expect)) {
+		pr_err("Unexpected response %s\n", ss->rx_buf);
+		ret = -EPROTO;
+	}
+out:
+	mutex_unlock(&ss->lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_shutdown_no_qmi);
+
+/**
+ * sysmon_get_reason_no_qmi() - Retrieve failure reason from a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to query
+ * @buf:	Caller-allocated buffer for the returned NUL-terminated reason
+ * @len:	Length of @buf
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+				char *buf, size_t len)
+{
+	struct sysmon_subsys *tmp, *ss = NULL;
+	const char tx_buf[] = "ssr:retrieve:sfr";
+	const char expect[] = "ssr:return:";
+	size_t prefix_len = ARRAY_SIZE(expect) - 1;
+	int ret;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(tmp, &sysmon_list, list)
+		if (tmp->pid == dest_desc->sysmon_pid)
+			ss = tmp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (ss == NULL || buf == NULL || len == 0)
+		return -EINVAL;
+
+	if (ss->dev == NULL)
+		return -ENODEV;
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, ARRAY_SIZE(tx_buf));
+	if (ret) {
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strncmp(ss->rx_buf, expect, prefix_len)) {
+		pr_err("Unexpected response %s\n", ss->rx_buf);
+		ret = -EPROTO;
+		goto out;
+	}
+	strlcpy(buf, ss->rx_buf + prefix_len, len);
+out:
+	mutex_unlock(&ss->lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_get_reason_no_qmi);
+
+static void sysmon_smd_notify(void *priv, unsigned int smd_event)
+{
+	struct sysmon_subsys *ss = priv;
+
+	switch (smd_event) {
+	case SMD_EVENT_DATA: {
+		if (smd_read_avail(ss->chan) > 0) {
+			smd_read_from_cb(ss->chan, ss->rx_buf,
+					 ARRAY_SIZE(ss->rx_buf));
+			complete(&ss->resp_ready);
+		}
+		break;
+	}
+	case SMD_EVENT_OPEN:
+		ss->chan_open = true;
+		break;
+	case SMD_EVENT_CLOSE:
+		ss->chan_open = false;
+		break;
+	}
+}
+
+static int sysmon_probe(struct platform_device *pdev)
+{
+	struct sysmon_subsys *ss;
+	int ret;
+
+	if (pdev->id < 0 || pdev->id >= SYSMON_NUM_SS)
+		return -ENODEV;
+
+	ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
+	if (!ss)
+		return -ENOMEM;
+
+	mutex_init(&ss->lock);
+	if (pdev->id == SYSMON_SS_EXT_MODEM) {
+		ss->transport = TRANSPORT_HSIC;
+		ret = hsic_sysmon_open(HSIC_SYSMON_DEV_EXT_MODEM);
+		if (ret) {
+			pr_err("HSIC open failed\n");
+			return ret;
+		}
+	} else if (pdev->id < SMD_NUM_TYPE) {
+		ss->transport = TRANSPORT_SMD;
+		ret = smd_named_open_on_edge("sys_mon", pdev->id, &ss->chan,
+						ss, sysmon_smd_notify);
+		if (ret) {
+			pr_err("SMD open failed\n");
+			return ret;
+		}
+		smd_disable_read_intr(ss->chan);
+	} else
+		return -EINVAL;
+
+	ss->dev = &pdev->dev;
+	ss->pid = pdev->id;
+
+	mutex_lock(&sysmon_list_lock);
+	INIT_LIST_HEAD(&ss->list);
+	list_add_tail(&ss->list, &sysmon_list);
+	mutex_unlock(&sysmon_list_lock);
+	return 0;
+}
+
+static int sysmon_remove(struct platform_device *pdev)
+{
+	struct sysmon_subsys *sysmon, *tmp, *ss = NULL;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry_safe(sysmon, tmp, &sysmon_list, list) {
+		if (sysmon->pid == pdev->id) {
+			ss = sysmon;
+			list_del(&ss->list);
+		}
+	}
+	mutex_unlock(&sysmon_list_lock);
+
+	if (ss == NULL)
+		return -EINVAL;
+
+	mutex_lock(&ss->lock);
+	switch (ss->transport) {
+	case TRANSPORT_SMD:
+		smd_close(ss->chan);
+		break;
+	case TRANSPORT_HSIC:
+		hsic_sysmon_close(HSIC_SYSMON_DEV_EXT_MODEM);
+		break;
+	}
+	mutex_unlock(&ss->lock);
+
+	return 0;
+}
+
+static struct platform_driver sysmon_driver = {
+	.probe		= sysmon_probe,
+	.remove		= sysmon_remove,
+	.driver		= {
+		.name		= "sys_mon",
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init sysmon_init(void)
+{
+	return platform_driver_register(&sysmon_driver);
+}
+subsys_initcall(sysmon_init);
+
+static void __exit sysmon_exit(void)
+{
+	platform_driver_unregister(&sysmon_driver);
+}
+module_exit(sysmon_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("system monitor communication library");
+MODULE_ALIAS("platform:sys_mon");
diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c
index 19ad864..e5d9bfc 100644
--- a/drivers/video/fbdev/amba-clcd-versatile.c
+++ b/drivers/video/fbdev/amba-clcd-versatile.c
@@ -526,8 +526,8 @@ int versatile_clcd_init_panel(struct clcd_fb *fb,
 	np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
 					     &clcd_id);
 	if (!np) {
-		dev_err(dev, "no Versatile syscon node\n");
-		return -ENODEV;
+		/* Vexpress does not have this */
+		return 0;
 	}
 	versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
 
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 9a28133..9b774f4 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -39,65 +39,54 @@ static void fname_crypt_complete(struct crypto_async_request *req, int res)
 static int fname_encrypt(struct inode *inode,
 			const struct qstr *iname, struct fscrypt_str *oname)
 {
-	u32 ciphertext_len;
 	struct skcipher_request *req = NULL;
 	DECLARE_FS_COMPLETION_RESULT(ecr);
 	struct fscrypt_info *ci = inode->i_crypt_info;
 	struct crypto_skcipher *tfm = ci->ci_ctfm;
 	int res = 0;
 	char iv[FS_CRYPTO_BLOCK_SIZE];
-	struct scatterlist src_sg, dst_sg;
+	struct scatterlist sg;
 	int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
-	char *workbuf, buf[32], *alloc_buf = NULL;
-	unsigned lim;
+	unsigned int lim;
+	unsigned int cryptlen;
 
 	lim = inode->i_sb->s_cop->max_namelen(inode);
 	if (iname->len <= 0 || iname->len > lim)
 		return -EIO;
 
-	ciphertext_len = max(iname->len, (u32)FS_CRYPTO_BLOCK_SIZE);
-	ciphertext_len = round_up(ciphertext_len, padding);
-	ciphertext_len = min(ciphertext_len, lim);
+	/*
+	 * Copy the filename to the output buffer for encrypting in-place and
+	 * pad it with the needed number of NUL bytes.
+	 */
+	cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE);
+	cryptlen = round_up(cryptlen, padding);
+	cryptlen = min(cryptlen, lim);
+	memcpy(oname->name, iname->name, iname->len);
+	memset(oname->name + iname->len, 0, cryptlen - iname->len);
 
-	if (ciphertext_len <= sizeof(buf)) {
-		workbuf = buf;
-	} else {
-		alloc_buf = kmalloc(ciphertext_len, GFP_NOFS);
-		if (!alloc_buf)
-			return -ENOMEM;
-		workbuf = alloc_buf;
-	}
+	/* Initialize the IV */
+	memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
 
-	/* Allocate request */
+	/* Set up the encryption request */
 	req = skcipher_request_alloc(tfm, GFP_NOFS);
 	if (!req) {
 		printk_ratelimited(KERN_ERR
-			"%s: crypto_request_alloc() failed\n", __func__);
-		kfree(alloc_buf);
+			"%s: skcipher_request_alloc() failed\n", __func__);
 		return -ENOMEM;
 	}
 	skcipher_request_set_callback(req,
 			CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
 			fname_crypt_complete, &ecr);
+	sg_init_one(&sg, oname->name, cryptlen);
+	skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv);
 
-	/* Copy the input */
-	memcpy(workbuf, iname->name, iname->len);
-	if (iname->len < ciphertext_len)
-		memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
-
-	/* Initialize IV */
-	memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
-
-	/* Create encryption request */
-	sg_init_one(&src_sg, workbuf, ciphertext_len);
-	sg_init_one(&dst_sg, oname->name, ciphertext_len);
-	skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
+	/* Do the encryption */
 	res = crypto_skcipher_encrypt(req);
 	if (res == -EINPROGRESS || res == -EBUSY) {
+		/* Request is being completed asynchronously; wait for it */
 		wait_for_completion(&ecr.completion);
 		res = ecr.res;
 	}
-	kfree(alloc_buf);
 	skcipher_request_free(req);
 	if (res < 0) {
 		printk_ratelimited(KERN_ERR
@@ -105,7 +94,7 @@ static int fname_encrypt(struct inode *inode,
 		return res;
 	}
 
-	oname->len = ciphertext_len;
+	oname->len = cryptlen;
 	return 0;
 }
 
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 82f0285..67fb6d8 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -185,7 +185,7 @@ int get_crypt_info(struct inode *inode)
 	struct crypto_skcipher *ctfm;
 	const char *cipher_str;
 	int keysize;
-	u8 raw_key[FS_MAX_KEY_SIZE];
+	u8 *raw_key = NULL;
 	int res;
 
 	res = fscrypt_initialize();
@@ -238,6 +238,15 @@ int get_crypt_info(struct inode *inode)
 	if (res)
 		goto out;
 
+	/*
+	 * This cannot be a stack buffer because it is passed to the scatterlist
+	 * crypto API as part of key derivation.
+	 */
+	res = -ENOMEM;
+	raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
+	if (!raw_key)
+		goto out;
+
 	if (fscrypt_dummy_context_enabled(inode)) {
 		memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
 		goto got_key;
@@ -276,7 +285,8 @@ int get_crypt_info(struct inode *inode)
 	if (res)
 		goto out;
 
-	memzero_explicit(raw_key, sizeof(raw_key));
+	kzfree(raw_key);
+	raw_key = NULL;
 	if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
 		put_crypt_info(crypt_info);
 		goto retry;
@@ -287,7 +297,7 @@ int get_crypt_info(struct inode *inode)
 	if (res == -ENOKEY)
 		res = 0;
 	put_crypt_info(crypt_info);
-	memzero_explicit(raw_key, sizeof(raw_key));
+	kzfree(raw_key);
 	return res;
 }
 
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 78dae46..20ee0e4 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -235,6 +235,7 @@ struct ext4_io_submit {
 #define	EXT4_MAX_BLOCK_SIZE		65536
 #define EXT4_MIN_BLOCK_LOG_SIZE		10
 #define EXT4_MAX_BLOCK_LOG_SIZE		16
+#define EXT4_MAX_CLUSTER_LOG_SIZE	30
 #ifdef __KERNEL__
 # define EXT4_BLOCK_SIZE(s)		((s)->s_blocksize)
 #else
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 20da99d..52b0530 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3565,7 +3565,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
 	    blocksize > EXT4_MAX_BLOCK_SIZE) {
 		ext4_msg(sb, KERN_ERR,
-		       "Unsupported filesystem blocksize %d", blocksize);
+		       "Unsupported filesystem blocksize %d (%d log_block_size)",
+			 blocksize, le32_to_cpu(es->s_log_block_size));
+		goto failed_mount;
+	}
+	if (le32_to_cpu(es->s_log_block_size) >
+	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+		ext4_msg(sb, KERN_ERR,
+			 "Invalid log block size: %u",
+			 le32_to_cpu(es->s_log_block_size));
 		goto failed_mount;
 	}
 
@@ -3697,6 +3705,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 				 "block size (%d)", clustersize, blocksize);
 			goto failed_mount;
 		}
+		if (le32_to_cpu(es->s_log_cluster_size) >
+		    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+			ext4_msg(sb, KERN_ERR,
+				 "Invalid log cluster size: %u",
+				 le32_to_cpu(es->s_log_cluster_size));
+			goto failed_mount;
+		}
 		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
 			le32_to_cpu(es->s_log_block_size);
 		sbi->s_clusters_per_group =
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 67cc9f7..95f4e51 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -331,6 +331,11 @@ const struct dentry_operations fuse_dentry_operations = {
 	.d_canonical_path = fuse_dentry_canonical_path,
 };
 
+const struct dentry_operations fuse_root_dentry_operations = {
+	.d_init		= fuse_dentry_init,
+	.d_release	= fuse_dentry_release,
+};
+
 int fuse_valid_type(int m)
 {
 	return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index abc66a6..2401c5d 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1985,6 +1985,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
 {
 	struct inode *inode = page->mapping->host;
 
+	/* Haven't copied anything?  Skip zeroing, size extending, dirtying. */
+	if (!copied)
+		goto unlock;
+
 	if (!PageUptodate(page)) {
 		/* Zero any unwritten bytes at the end of the page */
 		size_t endoff = (pos + copied) & ~PAGE_MASK;
@@ -1995,6 +1999,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
 
 	fuse_write_update_size(inode, pos + copied);
 	set_page_dirty(page);
+
+unlock:
 	unlock_page(page);
 	put_page(page);
 
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5f0a33f..6b30a12 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -695,6 +695,7 @@ static inline u64 get_node_id(struct inode *inode)
 extern const struct file_operations fuse_dev_operations;
 
 extern const struct dentry_operations fuse_dentry_operations;
+extern const struct dentry_operations fuse_root_dentry_operations;
 
 /**
  * Inode to nodeid comparison.
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 1714109..6fe6a88 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1131,10 +1131,11 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
 
 	err = -ENOMEM;
 	root = fuse_get_root_inode(sb, d.rootmode);
+	sb->s_d_op = &fuse_root_dentry_operations;
 	root_dentry = d_make_root(root);
 	if (!root_dentry)
 		goto err_dev_free;
-	/* only now - we want root dentry with NULL ->d_op */
+	/* Root dentry doesn't have .d_revalidate */
 	sb->s_d_op = &fuse_dentry_operations;
 
 	init_req = fuse_request_alloc(0);
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index d484068..38887cc 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -114,6 +114,7 @@ static const struct seq_operations help_debug_ops = {
 };
 
 const struct file_operations debug_help_fops = {
+	.owner		= THIS_MODULE,
 	.open           = orangefs_debug_help_open,
 	.read           = seq_read,
 	.release        = seq_release,
@@ -121,6 +122,7 @@ const struct file_operations debug_help_fops = {
 };
 
 static const struct file_operations kernel_debug_fops = {
+	.owner		= THIS_MODULE,
 	.open           = orangefs_debug_open,
 	.read           = orangefs_debug_read,
 	.write		= orangefs_debug_write,
diff --git a/fs/xattr.c b/fs/xattr.c
index 3368659..2d13b4e 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -170,7 +170,7 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
 		const void *value, size_t size, int flags)
 {
 	struct inode *inode = dentry->d_inode;
-	int error = -EOPNOTSUPP;
+	int error = -EAGAIN;
 	int issec = !strncmp(name, XATTR_SECURITY_PREFIX,
 				   XATTR_SECURITY_PREFIX_LEN);
 
@@ -183,15 +183,21 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
 			security_inode_post_setxattr(dentry, name, value,
 						     size, flags);
 		}
-	} else if (issec) {
-		const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
-
+	} else {
 		if (unlikely(is_bad_inode(inode)))
 			return -EIO;
-		error = security_inode_setsecurity(inode, suffix, value,
-						   size, flags);
-		if (!error)
-			fsnotify_xattr(dentry);
+	}
+	if (error == -EAGAIN) {
+		error = -EOPNOTSUPP;
+
+		if (issec) {
+			const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
+
+			error = security_inode_setsecurity(inode, suffix, value,
+							   size, flags);
+			if (!error)
+				fsnotify_xattr(dentry);
+		}
 	}
 
 	return error;
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 1b949e0..c19700e 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -230,72 +230,62 @@ struct acpi_table_facs {
 /* Fields common to all versions of the FADT */
 
 struct acpi_table_fadt {
-	struct acpi_table_header header;	/* [V1] Common ACPI table header */
-	u32 facs;		/* [V1] 32-bit physical address of FACS */
-	u32 dsdt;		/* [V1] 32-bit physical address of DSDT */
-	u8 model;		/* [V1] System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */
-	u8 preferred_profile;	/* [V1] Conveys preferred power management profile to OSPM. */
-	u16 sci_interrupt;	/* [V1] System vector of SCI interrupt */
-	u32 smi_command;	/* [V1] 32-bit Port address of SMI command port */
-	u8 acpi_enable;		/* [V1] Value to write to SMI_CMD to enable ACPI */
-	u8 acpi_disable;	/* [V1] Value to write to SMI_CMD to disable ACPI */
-	u8 s4_bios_request;	/* [V1] Value to write to SMI_CMD to enter S4BIOS state */
-	u8 pstate_control;	/* [V1] Processor performance state control */
-	u32 pm1a_event_block;	/* [V1] 32-bit port address of Power Mgt 1a Event Reg Blk */
-	u32 pm1b_event_block;	/* [V1] 32-bit port address of Power Mgt 1b Event Reg Blk */
-	u32 pm1a_control_block;	/* [V1] 32-bit port address of Power Mgt 1a Control Reg Blk */
-	u32 pm1b_control_block;	/* [V1] 32-bit port address of Power Mgt 1b Control Reg Blk */
-	u32 pm2_control_block;	/* [V1] 32-bit port address of Power Mgt 2 Control Reg Blk */
-	u32 pm_timer_block;	/* [V1] 32-bit port address of Power Mgt Timer Ctrl Reg Blk */
-	u32 gpe0_block;		/* [V1] 32-bit port address of General Purpose Event 0 Reg Blk */
-	u32 gpe1_block;		/* [V1] 32-bit port address of General Purpose Event 1 Reg Blk */
-	u8 pm1_event_length;	/* [V1] Byte Length of ports at pm1x_event_block */
-	u8 pm1_control_length;	/* [V1] Byte Length of ports at pm1x_control_block */
-	u8 pm2_control_length;	/* [V1] Byte Length of ports at pm2_control_block */
-	u8 pm_timer_length;	/* [V1] Byte Length of ports at pm_timer_block */
-	u8 gpe0_block_length;	/* [V1] Byte Length of ports at gpe0_block */
-	u8 gpe1_block_length;	/* [V1] Byte Length of ports at gpe1_block */
-	u8 gpe1_base;		/* [V1] Offset in GPE number space where GPE1 events start */
-	u8 cst_control;		/* [V1] Support for the _CST object and C-States change notification */
-	u16 c2_latency;		/* [V1] Worst case HW latency to enter/exit C2 state */
-	u16 c3_latency;		/* [V1] Worst case HW latency to enter/exit C3 state */
-	u16 flush_size;		/* [V1] Processor memory cache line width, in bytes */
-	u16 flush_stride;	/* [V1] Number of flush strides that need to be read */
-	u8 duty_offset;		/* [V1] Processor duty cycle index in processor P_CNT reg */
-	u8 duty_width;		/* [V1] Processor duty cycle value bit width in P_CNT register */
-	u8 day_alarm;		/* [V1] Index to day-of-month alarm in RTC CMOS RAM */
-	u8 month_alarm;		/* [V1] Index to month-of-year alarm in RTC CMOS RAM */
-	u8 century;		/* [V1] Index to century in RTC CMOS RAM */
-	u16 boot_flags;		/* [V3] IA-PC Boot Architecture Flags (see below for individual flags) */
-	u8 reserved;		/* [V1] Reserved, must be zero */
-	u32 flags;		/* [V1] Miscellaneous flag bits (see below for individual flags) */
-	/* End of Version 1 FADT fields (ACPI 1.0) */
-
-	struct acpi_generic_address reset_register;	/* [V3] 64-bit address of the Reset register */
-	u8 reset_value;		/* [V3] Value to write to the reset_register port to reset the system */
-	u16 arm_boot_flags;	/* [V5] ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */
-	u8 minor_revision;	/* [V5] FADT Minor Revision (ACPI 5.1) */
-	u64 Xfacs;		/* [V3] 64-bit physical address of FACS */
-	u64 Xdsdt;		/* [V3] 64-bit physical address of DSDT */
-	struct acpi_generic_address xpm1a_event_block;	/* [V3] 64-bit Extended Power Mgt 1a Event Reg Blk address */
-	struct acpi_generic_address xpm1b_event_block;	/* [V3] 64-bit Extended Power Mgt 1b Event Reg Blk address */
-	struct acpi_generic_address xpm1a_control_block;	/* [V3] 64-bit Extended Power Mgt 1a Control Reg Blk address */
-	struct acpi_generic_address xpm1b_control_block;	/* [V3] 64-bit Extended Power Mgt 1b Control Reg Blk address */
-	struct acpi_generic_address xpm2_control_block;	/* [V3] 64-bit Extended Power Mgt 2 Control Reg Blk address */
-	struct acpi_generic_address xpm_timer_block;	/* [V3] 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */
-	struct acpi_generic_address xgpe0_block;	/* [V3] 64-bit Extended General Purpose Event 0 Reg Blk address */
-	struct acpi_generic_address xgpe1_block;	/* [V3] 64-bit Extended General Purpose Event 1 Reg Blk address */
-	/* End of Version 3 FADT fields (ACPI 2.0) */
-
-	struct acpi_generic_address sleep_control;	/* [V4] 64-bit Sleep Control register (ACPI 5.0) */
-	/* End of Version 4 FADT fields (ACPI 3.0 and ACPI 4.0) (Field was originally reserved in ACPI 3.0) */
-
-	struct acpi_generic_address sleep_status;	/* [V5] 64-bit Sleep Status register (ACPI 5.0) */
-	/* End of Version 5 FADT fields (ACPI 5.0) */
-
-	u64 hypervisor_id;	/* [V6] Hypervisor Vendor ID (ACPI 6.0) */
-	/* End of Version 6 FADT fields (ACPI 6.0) */
-
+	struct acpi_table_header header;	/* Common ACPI table header */
+	u32 facs;		/* 32-bit physical address of FACS */
+	u32 dsdt;		/* 32-bit physical address of DSDT */
+	u8 model;		/* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */
+	u8 preferred_profile;	/* Conveys preferred power management profile to OSPM. */
+	u16 sci_interrupt;	/* System vector of SCI interrupt */
+	u32 smi_command;	/* 32-bit Port address of SMI command port */
+	u8 acpi_enable;		/* Value to write to SMI_CMD to enable ACPI */
+	u8 acpi_disable;	/* Value to write to SMI_CMD to disable ACPI */
+	u8 s4_bios_request;	/* Value to write to SMI_CMD to enter S4BIOS state */
+	u8 pstate_control;	/* Processor performance state control */
+	u32 pm1a_event_block;	/* 32-bit port address of Power Mgt 1a Event Reg Blk */
+	u32 pm1b_event_block;	/* 32-bit port address of Power Mgt 1b Event Reg Blk */
+	u32 pm1a_control_block;	/* 32-bit port address of Power Mgt 1a Control Reg Blk */
+	u32 pm1b_control_block;	/* 32-bit port address of Power Mgt 1b Control Reg Blk */
+	u32 pm2_control_block;	/* 32-bit port address of Power Mgt 2 Control Reg Blk */
+	u32 pm_timer_block;	/* 32-bit port address of Power Mgt Timer Ctrl Reg Blk */
+	u32 gpe0_block;		/* 32-bit port address of General Purpose Event 0 Reg Blk */
+	u32 gpe1_block;		/* 32-bit port address of General Purpose Event 1 Reg Blk */
+	u8 pm1_event_length;	/* Byte Length of ports at pm1x_event_block */
+	u8 pm1_control_length;	/* Byte Length of ports at pm1x_control_block */
+	u8 pm2_control_length;	/* Byte Length of ports at pm2_control_block */
+	u8 pm_timer_length;	/* Byte Length of ports at pm_timer_block */
+	u8 gpe0_block_length;	/* Byte Length of ports at gpe0_block */
+	u8 gpe1_block_length;	/* Byte Length of ports at gpe1_block */
+	u8 gpe1_base;		/* Offset in GPE number space where GPE1 events start */
+	u8 cst_control;		/* Support for the _CST object and C-States change notification */
+	u16 c2_latency;		/* Worst case HW latency to enter/exit C2 state */
+	u16 c3_latency;		/* Worst case HW latency to enter/exit C3 state */
+	u16 flush_size;		/* Processor memory cache line width, in bytes */
+	u16 flush_stride;	/* Number of flush strides that need to be read */
+	u8 duty_offset;		/* Processor duty cycle index in processor P_CNT reg */
+	u8 duty_width;		/* Processor duty cycle value bit width in P_CNT register */
+	u8 day_alarm;		/* Index to day-of-month alarm in RTC CMOS RAM */
+	u8 month_alarm;		/* Index to month-of-year alarm in RTC CMOS RAM */
+	u8 century;		/* Index to century in RTC CMOS RAM */
+	u16 boot_flags;		/* IA-PC Boot Architecture Flags (see below for individual flags) */
+	u8 reserved;		/* Reserved, must be zero */
+	u32 flags;		/* Miscellaneous flag bits (see below for individual flags) */
+	struct acpi_generic_address reset_register;	/* 64-bit address of the Reset register */
+	u8 reset_value;		/* Value to write to the reset_register port to reset the system */
+	u16 arm_boot_flags;	/* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */
+	u8 minor_revision;	/* FADT Minor Revision (ACPI 5.1) */
+	u64 Xfacs;		/* 64-bit physical address of FACS */
+	u64 Xdsdt;		/* 64-bit physical address of DSDT */
+	struct acpi_generic_address xpm1a_event_block;	/* 64-bit Extended Power Mgt 1a Event Reg Blk address */
+	struct acpi_generic_address xpm1b_event_block;	/* 64-bit Extended Power Mgt 1b Event Reg Blk address */
+	struct acpi_generic_address xpm1a_control_block;	/* 64-bit Extended Power Mgt 1a Control Reg Blk address */
+	struct acpi_generic_address xpm1b_control_block;	/* 64-bit Extended Power Mgt 1b Control Reg Blk address */
+	struct acpi_generic_address xpm2_control_block;	/* 64-bit Extended Power Mgt 2 Control Reg Blk address */
+	struct acpi_generic_address xpm_timer_block;	/* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */
+	struct acpi_generic_address xgpe0_block;	/* 64-bit Extended General Purpose Event 0 Reg Blk address */
+	struct acpi_generic_address xgpe1_block;	/* 64-bit Extended General Purpose Event 1 Reg Blk address */
+	struct acpi_generic_address sleep_control;	/* 64-bit Sleep Control register (ACPI 5.0) */
+	struct acpi_generic_address sleep_status;	/* 64-bit Sleep Status register (ACPI 5.0) */
+	u64 hypervisor_id;	/* Hypervisor Vendor ID (ACPI 6.0) */
 };
 
 /* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */
@@ -311,8 +301,8 @@ struct acpi_table_fadt {
 
 /* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */
 
-#define ACPI_FADT_PSCI_COMPLIANT    (1)	/* 00: [V5] PSCI 0.2+ is implemented */
-#define ACPI_FADT_PSCI_USE_HVC      (1<<1)	/* 01: [V5] HVC must be used instead of SMC as the PSCI conduit */
+#define ACPI_FADT_PSCI_COMPLIANT    (1)	/* 00: [V5+] PSCI 0.2+ is implemented */
+#define ACPI_FADT_PSCI_USE_HVC      (1<<1)	/* 01: [V5+] HVC must be used instead of SMC as the PSCI conduit */
 
 /* Masks for FADT flags */
 
@@ -409,34 +399,20 @@ struct acpi_table_desc {
  * match the expected length. In other words, the length of the
  * FADT is the bottom line as to what the version really is.
  *
- * NOTE: There is no officialy released V2 of the FADT. This
- * version was used only for prototyping and testing during the
- * 32-bit to 64-bit transition. V3 was the first official 64-bit
- * version of the FADT.
- *
- * Update this list of defines when a new version of the FADT is
- * added to the ACPI specification. Note that the FADT version is
- * only incremented when new fields are appended to the existing
- * version. Therefore, the FADT version is competely independent
- * from the version of the ACPI specification where it is
- * defined.
- *
- * For reference, the various FADT lengths are as follows:
- *     FADT V1 size: 0x074      ACPI 1.0
- *     FADT V3 size: 0x0F4      ACPI 2.0
- *     FADT V4 size: 0x100      ACPI 3.0 and ACPI 4.0
- *     FADT V5 size: 0x10C      ACPI 5.0
- *     FADT V6 size: 0x114      ACPI 6.0
+ * For reference, the values below are as follows:
+ *     FADT V1 size: 0x074
+ *     FADT V2 size: 0x084
+ *     FADT V3 size: 0x0F4
+ *     FADT V4 size: 0x0F4
+ *     FADT V5 size: 0x10C
+ *     FADT V6 size: 0x114
  */
-#define ACPI_FADT_V1_SIZE       (u32) (ACPI_FADT_OFFSET (flags) + 4)	/* ACPI 1.0 */
-#define ACPI_FADT_V3_SIZE       (u32) (ACPI_FADT_OFFSET (sleep_control))	/* ACPI 2.0 */
-#define ACPI_FADT_V4_SIZE       (u32) (ACPI_FADT_OFFSET (sleep_status))	/* ACPI 3.0 and ACPI 4.0 */
-#define ACPI_FADT_V5_SIZE       (u32) (ACPI_FADT_OFFSET (hypervisor_id))	/* ACPI 5.0 */
-#define ACPI_FADT_V6_SIZE       (u32) (sizeof (struct acpi_table_fadt))	/* ACPI 6.0 */
+#define ACPI_FADT_V1_SIZE       (u32) (ACPI_FADT_OFFSET (flags) + 4)
+#define ACPI_FADT_V2_SIZE       (u32) (ACPI_FADT_OFFSET (minor_revision) + 1)
+#define ACPI_FADT_V3_SIZE       (u32) (ACPI_FADT_OFFSET (sleep_control))
+#define ACPI_FADT_V5_SIZE       (u32) (ACPI_FADT_OFFSET (hypervisor_id))
+#define ACPI_FADT_V6_SIZE       (u32) (sizeof (struct acpi_table_fadt))
 
-/* Update these when new FADT versions are added */
-
-#define ACPI_FADT_MAX_VERSION   6
 #define ACPI_FADT_CONFORMANCE   "ACPI 6.1 (FADT version 6)"
 
 #endif				/* __ACTBL_H__ */
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index a5d98d1..e861a24 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -191,6 +191,9 @@
 #ifndef __init
 #define __init
 #endif
+#ifndef __iomem
+#define __iomem
+#endif
 
 /* Host-dependent types and defines for user-space ACPICA */
 
diff --git a/include/linux/esoc_client.h b/include/linux/esoc_client.h
new file mode 100644
index 0000000..77a8b50
--- /dev/null
+++ b/include/linux/esoc_client.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ESOC_CLIENT_H_
+#define __ESOC_CLIENT_H_
+
+#include <linux/device.h>
+#include <linux/esoc_ctrl.h>
+#include <linux/notifier.h>
+
+/*
+ * struct esoc_desc: Describes an external soc
+ * @name: external soc name
+ * @priv: private data for external soc
+ */
+struct esoc_desc {
+	const char *name;
+	const char *link;
+	void *priv;
+};
+
+#ifdef CONFIG_ESOC_CLIENT
+/* Can return probe deferral */
+struct esoc_desc *devm_register_esoc_client(struct device *dev,
+							const char *name);
+void devm_unregister_esoc_client(struct device *dev,
+						struct esoc_desc *esoc_desc);
+int esoc_register_client_notifier(struct notifier_block *nb);
+#else
+static inline struct esoc_desc *devm_register_esoc_client(struct device *dev,
+							const char *name)
+{
+	return NULL;
+}
+static inline void devm_unregister_esoc_client(struct device *dev,
+						struct esoc_desc *esoc_desc)
+{
+}
+static inline int esoc_register_client_notifier(struct notifier_block *nb)
+{
+	return -EIO;
+}
+#endif
+#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 9b9f65d..e35e6de 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 			unsigned char *vec);
 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 			 unsigned long new_addr, unsigned long old_end,
-			 pmd_t *old_pmd, pmd_t *new_pmd);
+			 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 			unsigned long addr, pgprot_t newprot,
 			int prot_numa);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 9e49e13..7aebe23 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -150,7 +150,7 @@ static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
 {
 #if defined(CONFIG_NET_L3_MASTER_DEV)
 	if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
-	    ipv6_l3mdev_skb(IP6CB(skb)->flags))
+	    skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
 		return true;
 #endif
 	return false;
diff --git a/include/linux/msm-sps.h b/include/linux/msm-sps.h
new file mode 100644
index 0000000..4a9b8a8
--- /dev/null
+++ b/include/linux/msm-sps.h
@@ -0,0 +1,1639 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Smart-Peripheral-Switch (SPS) API. */
+
+#ifndef _SPS_H_
+#define _SPS_H_
+
+#include <linux/types.h>	/* u32 */
+
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) || defined(CONFIG_ARM_LPAE)
+
+/* Returns upper 4bits of 36bits physical address */
+#define SPS_GET_UPPER_ADDR(addr) ((addr & 0xF00000000ULL) >> 32)
+
+/* Returns 36bits physical address from 32bit address &
+ * flags word
+ */
+#define DESC_FULL_ADDR(flags, addr) ((((phys_addr_t)flags & 0xF) << 32) | addr)
+
+/* Returns flags word with flags and 4bit upper address
+ * from flags and 36bit physical address
+ */
+#define DESC_FLAG_WORD(flags, addr) (((addr & 0xF00000000ULL) >> 32) | flags)
+
+#else
+
+#define SPS_GET_UPPER_ADDR(addr) (0)
+#define DESC_FULL_ADDR(flags, addr) (addr)
+#define DESC_FLAG_WORD(flags, addr) (flags)
+
+#endif
+
+/* Returns upper 4bits of 36bits physical address from
+ * flags word
+ */
+#define DESC_UPPER_ADDR(flags) ((flags & 0xF))
+
+/* Returns lower 32bits of 36bits physical address */
+#define SPS_GET_LOWER_ADDR(addr) ((u32)(addr & 0xFFFFFFFF))
+
+/* SPS device handle indicating use of system memory */
+#define SPS_DEV_HANDLE_MEM       (~0x0ul>>1)
+
+/* SPS device handle indicating use of BAM-DMA */
+
+/* SPS device handle invalid value */
+#define SPS_DEV_HANDLE_INVALID   0
+
+/* BAM invalid IRQ value */
+#define SPS_IRQ_INVALID          0
+
+/* Invalid address value */
+#define SPS_ADDR_INVALID      (0xDEADBEEF)
+
+/* Invalid peripheral device enumeration class */
+#define SPS_CLASS_INVALID     ((unsigned long)-1)
+
+/*
+ * This value specifies different configurations for an SPS connection.
+ * A non-default value instructs the SPS driver to search for the configuration
+ * in the fixed connection mapping table.
+ */
+#define SPS_CONFIG_DEFAULT       0
+
+/*
+ * This value instructs the SPS driver to use the default BAM-DMA channel
+ * threshold
+ */
+#define SPS_DMA_THRESHOLD_DEFAULT   0
+
+/* Flag bits supported by SPS hardware for struct sps_iovec */
+#define SPS_IOVEC_FLAG_INT  0x8000  /* Generate interrupt */
+#define SPS_IOVEC_FLAG_EOT  0x4000  /* Generate end-of-transfer indication */
+#define SPS_IOVEC_FLAG_EOB  0x2000  /* Generate end-of-block indication */
+#define SPS_IOVEC_FLAG_NWD  0x1000  /* notify when done */
+#define SPS_IOVEC_FLAG_CMD  0x0800  /* command descriptor */
+#define SPS_IOVEC_FLAG_LOCK  0x0400  /* pipe lock */
+#define SPS_IOVEC_FLAG_UNLOCK  0x0200  /* pipe unlock */
+#define SPS_IOVEC_FLAG_IMME 0x0100  /* immediate command descriptor */
+#define SPS_IOVEC_FLAG_NO_SUBMIT 0x0020  /* Do not submit descriptor to HW */
+#define SPS_IOVEC_FLAG_DEFAULT   0x0010  /* Use driver default */
+
+/* Maximum descriptor/iovec size */
+#define SPS_IOVEC_MAX_SIZE   (32 * 1024 - 1)  /* 32K-1 bytes due to HW limit */
+
+/* BAM device options flags */
+
+/*
+ * BAM will be configured and enabled at boot.  Otherwise, BAM will be
+ * configured and enabled when first pipe connect occurs.
+ */
+#define SPS_BAM_OPT_ENABLE_AT_BOOT  1UL
+/* BAM IRQ is disabled */
+#define SPS_BAM_OPT_IRQ_DISABLED    (1UL << 1)
+/* BAM peripheral is a BAM-DMA */
+#define SPS_BAM_OPT_BAMDMA          (1UL << 2)
+/* BAM IRQ is registered for apps wakeup */
+#define SPS_BAM_OPT_IRQ_WAKEUP      (1UL << 3)
+/* Ignore external block pipe reset */
+#define SPS_BAM_NO_EXT_P_RST        (1UL << 4)
+/* Don't enable local clock gating */
+#define SPS_BAM_NO_LOCAL_CLK_GATING (1UL << 5)
+/* Don't enable writeback cancel*/
+#define SPS_BAM_CANCEL_WB           (1UL << 6)
+/* BAM uses SMMU */
+#define SPS_BAM_SMMU_EN             (1UL << 9)
+/* Confirm resource status before access BAM*/
+#define SPS_BAM_RES_CONFIRM         (1UL << 7)
+/* Hold memory for BAM DMUX */
+#define SPS_BAM_HOLD_MEM            (1UL << 8)
+/* Use cached write pointer */
+#define SPS_BAM_CACHED_WP           (1UL << 10)
+
+/* BAM device management flags */
+
+/* BAM global device control is managed remotely */
+#define SPS_BAM_MGR_DEVICE_REMOTE   1UL
+/* BAM device supports multiple execution environments */
+#define SPS_BAM_MGR_MULTI_EE        (1UL << 1)
+/* BAM pipes are *not* allocated locally */
+#define SPS_BAM_MGR_PIPE_NO_ALLOC   (1UL << 2)
+/* BAM pipes are *not* configured locally */
+#define SPS_BAM_MGR_PIPE_NO_CONFIG  (1UL << 3)
+/* BAM pipes are *not* controlled locally */
+#define SPS_BAM_MGR_PIPE_NO_CTRL    (1UL << 4)
+/* "Globbed" management properties */
+#define SPS_BAM_MGR_NONE            \
+	(SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_PIPE_NO_ALLOC | \
+	 SPS_BAM_MGR_PIPE_NO_CONFIG | SPS_BAM_MGR_PIPE_NO_CTRL)
+#define SPS_BAM_MGR_LOCAL           0
+#define SPS_BAM_MGR_LOCAL_SHARED    SPS_BAM_MGR_MULTI_EE
+#define SPS_BAM_MGR_REMOTE_SHARED   \
+	(SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE | \
+	 SPS_BAM_MGR_PIPE_NO_ALLOC)
+#define SPS_BAM_MGR_ACCESS_MASK     SPS_BAM_MGR_NONE
+
+/*
+ * BAM security configuration
+ */
+#define SPS_BAM_NUM_EES             4
+#define SPS_BAM_SEC_DO_NOT_CONFIG   0
+#define SPS_BAM_SEC_DO_CONFIG       0x0A434553
+
+/* BAM pipe selection */
+#define SPS_BAM_PIPE(n)             (1UL << (n))
+
+/* This enum specifies the operational mode for an SPS connection */
+enum sps_mode {
+	SPS_MODE_SRC = 0,  /* end point is the source (producer) */
+	SPS_MODE_DEST,	   /* end point is the destination (consumer) */
+};
+
+
+/*
+ * This enum is a set of bit flag options for SPS connection.
+ * The enums should be OR'd together to create the option set
+ * for the SPS connection.
+ */
+enum sps_option {
+	/*
+	 * Options to enable specific SPS hardware interrupts.
+	 * These bit flags are also used to indicate interrupt source
+	 * for the SPS_EVENT_IRQ event.
+	 */
+	SPS_O_DESC_DONE = 0x00000001,  /* Descriptor processed */
+	SPS_O_INACTIVE  = 0x00000002,  /* Inactivity timeout */
+	SPS_O_WAKEUP    = 0x00000004,  /* Peripheral wake up */
+	SPS_O_OUT_OF_DESC = 0x00000008,/* Out of descriptors */
+	SPS_O_ERROR     = 0x00000010,  /* Error */
+	SPS_O_EOT       = 0x00000020,  /* End-of-transfer */
+	SPS_O_RST_ERROR = 0x00000040,  /* Pipe reset unsucessful error */
+	SPS_O_HRESP_ERROR = 0x00000080,/* Errorneous Hresponse by AHB MASTER */
+
+	/* Options to enable hardware features */
+	SPS_O_STREAMING = 0x00010000,  /* Enable streaming mode (no EOT) */
+	/* Use MTI/SETPEND instead of BAM interrupt */
+	SPS_O_IRQ_MTI   = 0x00020000,
+	/* NWD bit written with EOT for BAM2BAM producer pipe */
+	SPS_O_WRITE_NWD   = 0x00040000,
+       /* EOT set after pipe SW offset advanced */
+	SPS_O_LATE_EOT   = 0x00080000,
+
+	/* Options to enable software features */
+	/* Do not disable a pipe during disconnection */
+	SPS_O_NO_DISABLE      = 0x00800000,
+	/* Transfer operation should be polled */
+	SPS_O_POLL      = 0x01000000,
+	/* Disable queuing of transfer events for the connection end point */
+	SPS_O_NO_Q      = 0x02000000,
+	SPS_O_FLOWOFF   = 0x04000000,  /* Graceful halt */
+	/* SPS_O_WAKEUP will be disabled after triggered */
+	SPS_O_WAKEUP_IS_ONESHOT = 0x08000000,
+	/**
+	 * Client must read each descriptor from the FIFO
+	 * using sps_get_iovec()
+	 */
+	SPS_O_ACK_TRANSFERS = 0x10000000,
+	/* Connection is automatically enabled */
+	SPS_O_AUTO_ENABLE = 0x20000000,
+	/* DISABLE endpoint synchronization for config/enable/disable */
+	SPS_O_NO_EP_SYNC = 0x40000000,
+	/* Allow partial polling duing IRQ mode */
+	SPS_O_HYBRID = 0x80000000,
+};
+
+/**
+ * This enum specifies BAM DMA channel priority.  Clients should use
+ * SPS_DMA_PRI_DEFAULT unless a specific priority is required.
+ */
+enum sps_dma_priority {
+	SPS_DMA_PRI_DEFAULT = 0,
+	SPS_DMA_PRI_LOW,
+	SPS_DMA_PRI_MED,
+	SPS_DMA_PRI_HIGH,
+};
+
+/*
+ * This enum specifies the ownership of a connection resource.
+ * Remote or shared ownership is only possible/meaningful on the processor
+ * that controls resource.
+ */
+enum sps_owner {
+	SPS_OWNER_LOCAL = 0x1,	/* Resource is owned by local processor */
+	SPS_OWNER_REMOTE = 0x2,	/* Resource is owned by a satellite processor */
+};
+
+/* This enum indicates the event associated with a client event trigger */
+enum sps_event {
+	SPS_EVENT_INVALID = 0,
+
+	SPS_EVENT_EOT,		/* End-of-transfer */
+	SPS_EVENT_DESC_DONE,	/* Descriptor processed */
+	SPS_EVENT_OUT_OF_DESC,	/* Out of descriptors */
+	SPS_EVENT_WAKEUP,	/* Peripheral wake up */
+	SPS_EVENT_FLOWOFF,	/* Graceful halt (idle) */
+	SPS_EVENT_INACTIVE,	/* Inactivity timeout */
+	SPS_EVENT_ERROR,	/* Error */
+	SPS_EVENT_RST_ERROR,    /* Pipe Reset unsuccessful */
+	SPS_EVENT_HRESP_ERROR,  /* Errorneous Hresponse by AHB Master*/
+	SPS_EVENT_MAX,
+};
+
+/*
+ * This enum specifies the event trigger mode and is an argument for the
+ * sps_register_event() function.
+ */
+enum sps_trigger {
+	/* Trigger with payload for callback */
+	SPS_TRIGGER_CALLBACK = 0,
+	/* Trigger without payload for wait or poll */
+	SPS_TRIGGER_WAIT,
+};
+
+/*
+ * This enum indicates the desired halting mechanism and is an argument for the
+ * sps_flow_off() function
+ */
+enum sps_flow_off {
+	SPS_FLOWOFF_FORCED = 0,	/* Force hardware into halt state */
+	/* Allow hardware to empty pipe before halting */
+	SPS_FLOWOFF_GRACEFUL,
+};
+
+/*
+ * This enum indicates the target memory heap and is an argument for the
+ * sps_mem_alloc() function.
+ */
+enum sps_mem {
+	SPS_MEM_LOCAL = 0,  /* SPS subsystem local (pipe) memory */
+	SPS_MEM_UC,	    /* Microcontroller (ARM7) local memory */
+};
+
+/*
+ * This enum indicates a timer control operation and is an argument for the
+ * sps_timer_ctrl() function.
+ */
+enum sps_timer_op {
+	SPS_TIMER_OP_CONFIG = 0,
+	SPS_TIMER_OP_RESET,
+/*   SPS_TIMER_OP_START,   Not supported by hardware yet */
+/*   SPS_TIMER_OP_STOP,    Not supported by hardware yet */
+	SPS_TIMER_OP_READ,
+};
+
+/*
+ * This enum indicates the inactivity timer operating mode and is an
+ * argument for the sps_timer_ctrl() function.
+ */
+enum sps_timer_mode {
+	SPS_TIMER_MODE_ONESHOT = 0,
+/*   SPS_TIMER_MODE_PERIODIC,    Not supported by hardware yet */
+};
+
+/* This enum indicates the cases when callback the user of BAM */
+enum sps_callback_case {
+	SPS_CALLBACK_BAM_ERROR_IRQ = 1,     /* BAM ERROR IRQ */
+	SPS_CALLBACK_BAM_HRESP_ERR_IRQ,	    /* Erroneous HResponse */
+	SPS_CALLBACK_BAM_TIMER_IRQ,	    /* Inactivity timer */
+	SPS_CALLBACK_BAM_RES_REQ,	    /* Request resource */
+	SPS_CALLBACK_BAM_RES_REL,	    /* Release resource */
+	SPS_CALLBACK_BAM_POLL,	            /* To poll each pipe */
+};
+
+/*
+ * This enum indicates the command type in a command element
+ */
+enum sps_command_type {
+	SPS_WRITE_COMMAND = 0,
+	SPS_READ_COMMAND,
+};
+
+/**
+ * struct msm_sps_platform_data - SPS Platform specific data.
+ * @bamdma_restricted_pipes - Bitmask of pipes restricted from local use.
+ *
+ */
+struct msm_sps_platform_data {
+	u32 bamdma_restricted_pipes;
+};
+
+/**
+ * This data type corresponds to the native I/O vector (BAM descriptor)
+ * supported by SPS hardware
+ *
+ * @addr - Buffer physical address.
+ * @size - Buffer size in bytes.
+ * @flags -Flag bitmask (see SPS_IOVEC_FLAG_ #defines).
+ *
+ */
+struct sps_iovec {
+	u32 addr;
+	u32 size:16;
+	u32 flags:16;
+};
+
+/**
+ * This data type corresponds to the native Command Element
+ * supported by SPS hardware
+ *
+ * @addr - register address.
+ * @command - command type.
+ * @data - for write command: content to be written into peripheral register.
+ *         for read command: dest addr to write peripheral register value to.
+ * @mask - register mask.
+ * @reserved - for future usage.
+ *
+ */
+struct sps_command_element {
+	u32 addr:24;
+	u32 command:8;
+	u32 data;
+	u32 mask;
+	u32 reserved;
+};
+
+/*
+ * BAM device's security configuation
+ */
+struct sps_bam_pipe_sec_config_props {
+	u32 pipe_mask;
+	u32 vmid;
+};
+
+struct sps_bam_sec_config_props {
+	/* Per-EE configuration - This is a pipe bit mask for each EE */
+	struct sps_bam_pipe_sec_config_props ees[SPS_BAM_NUM_EES];
+};
+
+/**
+ * This struct defines a BAM device. The client must memset() this struct to
+ * zero before writing device information.  A value of zero for uninitialized
+ * values will instruct the SPS driver to use general defaults or
+ * hardware/BIOS supplied values.
+ *
+ *
+ * @options - See SPS_BAM_OPT_* bit flag.
+ * @phys_addr - BAM base physical address (not peripheral address).
+ * @virt_addr - BAM base virtual address.
+ * @virt_size - For virtual mapping.
+ * @irq - IRQ enum for use in ISR vector install.
+ * @num_pipes - number of pipes. Can be read from hardware.
+ * @summing_threshold - BAM event threshold.
+ *
+ * @periph_class - Peripheral device enumeration class.
+ * @periph_dev_id - Peripheral global device ID.
+ * @periph_phys_addr - Peripheral base physical address, for BAM-DMA only.
+ * @periph_virt_addr - Peripheral base virtual address.
+ * @periph_virt_size - Size for virtual mapping.
+ *
+ * @callback - callback function for BAM user.
+ * @user - pointer to user data.
+ *
+ * @event_threshold - Pipe event threshold.
+ * @desc_size - Size (bytes) of descriptor FIFO.
+ * @data_size - Size (bytes) of data FIFO.
+ * @desc_mem_id - Heap ID for default descriptor FIFO allocations.
+ * @data_mem_id - Heap ID for default data FIFO allocations.
+ *
+ * @manage - BAM device management flags (see SPS_BAM_MGR_*).
+ * @restricted_pipes - Bitmask of pipes restricted from local use.
+ * @ee - Local execution environment index.
+ *
+ * @irq_gen_addr - MTI interrupt generation address. This configuration only
+ * applies to BAM rev 1 and 2 hardware. MTIs are only supported on BAMs when
+ * global config is controlled by a remote processor.
+ * NOTE: This address must correspond to the MTI associated with the "irq" IRQ
+ * enum specified above.
+ *
+ * @sec_config - must be set to SPS_BAM_SEC_DO_CONFIG to perform BAM security
+ * configuration.  Only the processor that manages the BAM is allowed to
+ * perform the configuration. The global (top-level) BAM interrupt will be
+ * assigned to the EE of the processor that manages the BAM.
+ *
+ * @p_sec_config_props - BAM device's security configuation
+ *
+ */
+struct sps_bam_props {
+
+	/* BAM device properties. */
+
+	u32 options;
+	phys_addr_t phys_addr;
+	void *virt_addr;
+	u32 virt_size;
+	u32 irq;
+	u32 num_pipes;
+	u32 summing_threshold;
+
+	/* Peripheral device properties */
+
+	u32 periph_class;
+	u32 periph_dev_id;
+	phys_addr_t periph_phys_addr;
+	void *periph_virt_addr;
+	u32 periph_virt_size;
+
+	/* Connection pipe parameter defaults. */
+
+	u32 event_threshold;
+	u32 desc_size;
+	u32 data_size;
+	u32 desc_mem_id;
+	u32 data_mem_id;
+
+	/* Feedback to BAM user */
+	void (*callback)(enum sps_callback_case, void *);
+	void *user;
+
+	/* Security properties */
+
+	u32 manage;
+	u32 restricted_pipes;
+	u32 ee;
+
+	/* Log Level property */
+	u32 ipc_loglevel;
+
+	/* BAM MTI interrupt generation */
+
+	u32 irq_gen_addr;
+
+	/* Security configuration properties */
+
+	u32 sec_config;
+	struct sps_bam_sec_config_props *p_sec_config_props;
+
+	/* Logging control */
+
+	bool constrained_logging;
+	u32 logging_number;
+};
+
+/**
+ *  This struct specifies memory buffer properties.
+ *
+ * @base - Buffer virtual address.
+ * @phys_base - Buffer physical address.
+ * @size - Specifies buffer size (or maximum size).
+ * @min_size - If non-zero, specifies buffer minimum size.
+ *
+ */
+struct sps_mem_buffer {
+	void *base;
+	phys_addr_t phys_base;
+	unsigned long iova;
+	u32 size;
+	u32 min_size;
+};
+
+/**
+ * This struct defines a connection's end point and is used as the argument
+ * for the sps_connect(), sps_get_config(), and sps_set_config() functions.
+ * For system mode pipe, use SPS_DEV_HANDLE_MEM for the end point that
+ * corresponds to system memory.
+ *
+ * The client can force SPS to reserve a specific pipe on a BAM.
+ * If the pipe is in use, the sps_connect/set_config() will fail.
+ *
+ * @source - Source BAM.
+ * @src_pipe_index - BAM pipe index, 0 to 30.
+ * @destination - Destination BAM.
+ * @dest_pipe_index - BAM pipe index, 0 to 30.
+ *
+ * @mode - specifies which end (source or destination) of the connection will
+ * be controlled/referenced by the client.
+ *
+ * @config - This value is for future use and should be set to
+ * SPS_CONFIG_DEFAULT or left as default from sps_get_config().
+ *
+ * @options - OR'd connection end point options (see SPS_O defines).
+ *
+ * WARNING: The memory provided should be physically contiguous and non-cached.
+ * The user can use one of the following:
+ * 1. sps_alloc_mem() - allocated from pipe-memory.
+ * 2. dma_alloc_coherent() - allocate coherent DMA memory.
+ * 3. dma_map_single() - for using memory allocated by kmalloc().
+ *
+ * @desc - Descriptor FIFO.
+ * @data - Data FIFO (BAM-to-BAM mode only).
+ *
+ * @event_thresh - Pipe event threshold or derivative.
+ * @lock_group - The lock group this pipe belongs to.
+ *
+ * @sps_reserved - Reserved word - client must not modify.
+ *
+ */
+struct sps_connect {
+	unsigned long source;
+	unsigned long source_iova;
+	u32 src_pipe_index;
+	unsigned long destination;
+	unsigned long dest_iova;
+	u32 dest_pipe_index;
+
+	enum sps_mode mode;
+
+	u32 config;
+
+	enum sps_option options;
+
+	struct sps_mem_buffer desc;
+	struct sps_mem_buffer data;
+
+	u32 event_thresh;
+
+	u32 lock_group;
+
+	/* SETPEND/MTI interrupt generation parameters */
+
+	u32 irq_gen_addr;
+	u32 irq_gen_data;
+
+	u32 sps_reserved;
+
+};
+
+/**
+ * This struct defines a satellite connection's end point.  The client of the
+ * SPS driver on the satellite processor must call sps_get_config() to
+ * initialize a struct sps_connect, then copy the values from the struct
+ * sps_satellite to the struct sps_connect before making the sps_connect()
+ * call to the satellite SPS driver.
+ *
+ */
+struct sps_satellite {
+	/**
+	 * These values must be copied to either the source or destination
+	 * corresponding values in the connect struct.
+	 */
+	phys_addr_t dev;
+	u32 pipe_index;
+
+	/**
+	 * These values must be copied to the corresponding values in the
+	 * connect struct
+	 */
+	u32 config;
+	enum sps_option options;
+
+};
+
+/**
+ * This struct defines parameters for allocation of a BAM DMA channel. The
+ * client must memset() this struct to zero before writing allocation
+ * information.  A value of zero for uninitialized values will instruct
+ * the SPS driver to use defaults or "don't care".
+ *
+ * @dev - Associated BAM device handle, or SPS_DEV_HANDLE_DMA.
+ *
+ * @src_owner - Source owner processor ID.
+ * @dest_owner - Destination owner processor ID.
+ *
+ */
+struct sps_alloc_dma_chan {
+	unsigned long dev;
+
+	/* BAM DMA channel configuration parameters */
+
+	u32 threshold;
+	enum sps_dma_priority priority;
+
+	/**
+	 * Owner IDs are global host processor identifiers used by the system
+	 * SROT when establishing execution environments.
+	 */
+	u32 src_owner;
+	u32 dest_owner;
+
+};
+
+/**
+ * This struct defines parameters for an allocated BAM DMA channel.
+ *
+ * @dev - BAM DMA device handle.
+ * @dest_pipe_index - Destination/input/write pipe index.
+ * @src_pipe_index - Source/output/read pipe index.
+ *
+ */
+struct sps_dma_chan {
+	unsigned long dev;
+	u32 dest_pipe_index;
+	u32 src_pipe_index;
+};
+
+/**
+ * This struct is an argument passed payload when triggering a callback event
+ * object registered for an SPS connection end point.
+ *
+ * @user - Pointer registered with sps_register_event().
+ *
+ * @event_id - Which event.
+ *
+ * @iovec - The associated I/O vector. If the end point is a system-mode
+ * producer, the size will reflect the actual number of bytes written to the
+ * buffer by the pipe. NOTE: If this I/O vector was part of a set submitted to
+ * sps_transfer(), then the vector array itself will be	updated with all of
+ * the actual counts.
+ *
+ * @user - Pointer registered with the transfer.
+ *
+ */
+struct sps_event_notify {
+	void *user;
+
+	enum sps_event event_id;
+
+	/* Data associated with the event */
+
+	union {
+		/* Data for SPS_EVENT_IRQ */
+		struct {
+			u32 mask;
+		} irq;
+
+		/* Data for SPS_EVENT_EOT or SPS_EVENT_DESC_DONE */
+
+		struct {
+			struct sps_iovec iovec;
+			void *user;
+		} transfer;
+
+		/* Data for SPS_EVENT_ERROR */
+
+		struct {
+			u32 status;
+		} err;
+
+	} data;
+};
+
+/**
+ * This struct defines a event registration parameters and is used as the
+ * argument for the sps_register_event() function.
+ *
+ * @options - Event options that will trigger the event object.
+ * @mode - Event trigger mode.
+ *
+ * @xfer_done - a pointer to a completion object. NULL if not in use.
+ *
+ * @callback - a callback to call on completion. NULL if not in use.
+ *
+ * @user - User pointer that will be provided in event callback data.
+ *
+ */
+struct sps_register_event {
+	enum sps_option options;
+	enum sps_trigger mode;
+	struct completion *xfer_done;
+	void (*callback)(struct sps_event_notify *notify);
+	void *user;
+};
+
+/**
+ * This struct defines a system memory transfer's parameters and is used as the
+ * argument for the sps_transfer() function.
+ *
+ * @iovec_phys - Physical address of I/O vectors buffer.
+ * @iovec - Pointer to I/O vectors buffer.
+ * @iovec_count - Number of I/O vectors.
+ * @user - User pointer passed in callback event.
+ *
+ */
+struct sps_transfer {
+	phys_addr_t iovec_phys;
+	struct sps_iovec *iovec;
+	u32 iovec_count;
+	void *user;
+};
+
+/**
+ * This struct defines a timer control operation parameters and is used as an
+ * argument for the sps_timer_ctrl() function.
+ *
+ * @op - Timer control operation.
+ * @timeout_msec - Inactivity timeout (msec).
+ *
+ */
+struct sps_timer_ctrl {
+	enum sps_timer_op op;
+
+	/**
+	 * The following configuration parameters must be set when the timer
+	 * control operation is SPS_TIMER_OP_CONFIG.
+	 */
+	enum sps_timer_mode mode;
+	u32 timeout_msec;
+};
+
+/**
+ * This struct defines a timer control operation result and is used as an
+ * argument for the sps_timer_ctrl() function.
+ */
+struct sps_timer_result {
+	u32 current_timer;
+};
+
+
+/*----------------------------------------------------------------------------
+ * Functions specific to sps interface
+ * ---------------------------------------------------------------------------
+ */
+struct sps_pipe;	/* Forward declaration */
+
+#ifdef CONFIG_SPS
+/**
+ * Register a BAM device
+ *
+ * This function registers a BAM device with the SPS driver. For each
+ *peripheral that includes a BAM, the peripheral driver must register
+ * the BAM with the SPS driver.
+ *
+ * A requirement is that the peripheral driver must remain attached
+ * to the SPS driver until the BAM is deregistered. Otherwise, the
+ * system may attempt to unload the SPS driver. BAM registrations would
+ * be lost.
+ *
+ * @bam_props - Pointer to struct for BAM device properties.
+ *
+ * @dev_handle - Device handle will be written to this location (output).
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_register_bam_device(const struct sps_bam_props *bam_props,
+			    unsigned long *dev_handle);
+
+/**
+ * Deregister a BAM device
+ *
+ * This function deregisters a BAM device from the SPS driver. The peripheral
+ * driver should deregister a BAM when the peripheral driver is shut down or
+ * when BAM use should be disabled.
+ *
+ * A BAM cannot be deregistered if any of its pipes is in an active connection.
+ *
+ * When all BAMs have been deregistered, the system is free to unload the
+ * SPS driver.
+ *
+ * @dev_handle - BAM device handle.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_deregister_bam_device(unsigned long dev_handle);
+
+/**
+ * Allocate client state context
+ *
+ * This function allocate and initializes a client state context struct.
+ *
+ * @return pointer to client state context
+ *
+ */
+struct sps_pipe *sps_alloc_endpoint(void);
+
+/**
+ * Free client state context
+ *
+ * This function de-initializes and free a client state context struct.
+ *
+ * @ctx - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_free_endpoint(struct sps_pipe *h);
+
+/**
+ * Get the configuration parameters for an SPS connection end point
+ *
+ * This function retrieves the configuration parameters for an SPS connection
+ * end point.
+ * This function may be called before the end point is connected (before
+ * sps_connect is called). This allows the client to specify parameters before
+ * the connection is established.
+ *
+ * The client must call this function to fill it's struct sps_connect
+ * struct before modifying values and passing the struct to sps_set_config().
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @config - Pointer to buffer for the end point's configuration parameters.
+ * Must not be NULL.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_config(struct sps_pipe *h, struct sps_connect *config);
+
+/**
+ * Allocate memory from the SPS Pipe-Memory.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @mem - memory type - N/A.
+ *
+ * @mem_buffer - Pointer to struct for allocated memory properties.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_alloc_mem(struct sps_pipe *h, enum sps_mem mem,
+		  struct sps_mem_buffer *mem_buffer);
+
+/**
+ * Free memory from the SPS Pipe-Memory.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @mem_buffer - Pointer to struct for allocated memory properties.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_free_mem(struct sps_pipe *h, struct sps_mem_buffer *mem_buffer);
+
+/**
+ * Connect an SPS connection end point
+ *
+ * This function creates a connection between two SPS peripherals or between
+ * an SPS peripheral and the local host processor (via system memory, end
+ *point SPS_DEV_HANDLE_MEM). Establishing the connection includes
+ * initialization of the SPS hardware and allocation of any other connection
+ * resources (buffer memory, etc.).
+ *
+ * This function requires the client to specify both the source and
+ * destination end points of the SPS connection. However, the handle
+ * returned applies only to the end point of the connection that the client
+ * controls. The end point under control must be specified by the
+ * enum sps_mode mode argument, either SPS_MODE_SRC, SPS_MODE_DEST, or
+ * SPS_MODE_CTL. Note that SPS_MODE_CTL is only supported for I/O
+ * accelerator connections, and only a limited set of control operations are
+ * allowed (TBD).
+ *
+ * For a connection involving system memory
+ * (SPS_DEV_HANDLE_MEM), the peripheral end point must be
+ * specified. For example, SPS_MODE_SRC must be specified for a
+ * BAM-to-system connection, since the BAM pipe is the data
+ * producer.
+ *
+ * For a specific peripheral-to-peripheral connection, there may be more than
+ * one required configuration. For example, there might be high-performance
+ * and low-power configurations for a connection between the two peripherals.
+ * The config argument allows the client to specify different configurations,
+ * which may require different system resource allocations and hardware
+ * initialization.
+ *
+ * A client is allowed to create one and only one connection for its
+ * struct sps_pipe. The handle is used to identify the connection end point
+ * in subsequent SPS driver calls. A specific connection source or
+ * destination end point can be associated with one and only one
+ * struct sps_pipe.
+ *
+ * The client must establish an open device handle to the SPS. To do so, the
+ * client must attach to the SPS driver and open the SPS device by calling
+ * the following functions.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @connect - Pointer to connection parameters
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_connect(struct sps_pipe *h, struct sps_connect *connect);
+
+/**
+ * Disconnect an SPS connection end point
+ *
+ * This function disconnects an SPS connection end point.
+ * The SPS hardware associated with that end point will be disabled.
+ * For a connection involving system memory (SPS_DEV_HANDLE_MEM), all
+ * connection resources are deallocated. For a peripheral-to-peripheral
+ * connection, the resources associated with the connection will not be
+ * deallocated until both end points are closed.
+ *
+ * The client must call sps_connect() for the handle before calling
+ * this function.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_disconnect(struct sps_pipe *h);
+
+/**
+ * Register an event object for an SPS connection end point
+ *
+ * This function registers a callback event object for an SPS connection end
+ *point. The registered event object will be triggered for the set of
+ * events specified in reg->options that are enabled for the end point.
+ *
+ * There can only be one registered event object for each event. If an event
+ * object is already registered for an event, it will be replaced. If
+ *reg->event handle is NULL, then any registered event object for the
+ * event will be deregistered. Option bits in reg->options not associated
+ * with events are ignored.
+ *
+ * The client must call sps_connect() for the handle before calling
+ * this function.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @reg - Pointer to event registration parameters
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_register_event(struct sps_pipe *h, struct sps_register_event *reg);
+
+/**
+ * Perform a single DMA transfer on an SPS connection end point
+ *
+ * This function submits a DMA transfer request consisting of a single buffer
+ * for an SPS connection end point associated with a peripheral-to/from-memory
+ * connection. The request will be submitted immediately to hardware if the
+ * hardware is idle (data flow off, no other pending transfers). Otherwise, it
+ * will be queued for later handling in the SPS driver work loop.
+ *
+ * The data buffer must be DMA ready. The client is responsible for insuring
+ *physically contiguous memory, cache maintenance, and memory barrier. For
+ * more information, see Appendix A.
+ *
+ * The client must not modify the data buffer until the completion indication is
+ * received.
+ *
+ * This function cannot be used if transfer queuing is disabled (see option
+ * SPS_O_NO_Q). The client must set the SPS_O_EOT option to receive a callback
+ * event trigger when the transfer is complete. The SPS driver will insure the
+ * appropriate flags in the I/O vectors are set to generate the completion
+ * indication.
+ *
+ * The return value from this function may indicate that an error occurred.
+ * Possible causes include invalid arguments.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @addr - Physical address of buffer to transfer.
+ *
+ * WARNING: The memory provided	should be physically contiguous and
+ * non-cached.
+ *
+ * The user can use one of the following:
+ * 1. sps_alloc_mem() - allocated from pipe-memory.
+ * 2. dma_alloc_coherent() - allocate DMA memory.
+ * 3. dma_map_single() for memory allocated by kmalloc().
+ *
+ * @size - Size in bytes of buffer to transfer
+ *
+ * @user - User pointer that will be returned to user as part of
+ *  event payload
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_transfer_one(struct sps_pipe *h, phys_addr_t addr, u32 size,
+		     void *user, u32 flags);
+
+/**
+ * Read event queue for an SPS connection end point
+ *
+ * This function reads event queue for an SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @event - pointer to client's event data buffer
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_event(struct sps_pipe *h, struct sps_event_notify *event);
+
+/**
+ * Get processed I/O vector (completed transfers)
+ *
+ * This function fetches the next processed I/O vector.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @iovec - Pointer to I/O vector struct (output).
+ * This struct will be zeroed if there are no more processed I/O vectors.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_iovec(struct sps_pipe *h, struct sps_iovec *iovec);
+
+/**
+ * Enable an SPS connection end point
+ *
+ * This function enables an SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_flow_on(struct sps_pipe *h);
+
+/**
+ * Disable an SPS connection end point
+ *
+ * This function disables an SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @mode - Desired mode for disabling pipe data flow
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_flow_off(struct sps_pipe *h, enum sps_flow_off mode);
+
+/**
+ * Perform a Multiple DMA transfer on an SPS connection end point
+ *
+ * This function submits a DMA transfer request for an SPS connection end point
+ * associated with a peripheral-to/from-memory connection. The request will be
+ * submitted immediately to hardware if the hardware is idle (data flow off, no
+ * other pending transfers). Otherwise, it will be queued for later handling in
+ * the SPS driver work loop.
+ *
+ * The data buffers referenced by the I/O vectors must be DMA ready.
+ * The client is responsible for insuring physically contiguous memory,
+ * any cache maintenance, and memory barrier. For more information,
+ * see Appendix A.
+ *
+ * The I/O vectors must specify physical addresses for the referenced buffers.
+ *
+ * The client must not modify the data buffers referenced by I/O vectors until
+ * the completion indication is received.
+ *
+ * If transfer queuing is disabled (see option SPS_O_NO_Q), the client is
+ * responsible for setting the appropriate flags in the I/O vectors to generate
+ * the completion indication. Also, the client is responsible for enabling the
+ * appropriate connection callback event options for completion indication (see
+ * sps_connect(), sps_set_config()).
+ *
+ * If transfer queuing is enabled, the client must set the SPS_O_EOT option to
+ * receive a callback event trigger when the transfer is complete. The SPS
+ * driver will insure the appropriate flags in the I/O vectors are set to
+ * generate the completion indication. The client must not set any flags in the
+ * I/O vectors, as this may cause the SPS driver to become out of sync with the
+ * hardware.
+ *
+ * The return value from this function may indicate that an error occurred.
+ * Possible causes include invalid arguments. If transfer queuing is disabled,
+ * an error will occur if the pipe is already processing a transfer.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @transfer - Pointer to transfer parameter struct
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_transfer(struct sps_pipe *h, struct sps_transfer *transfer);
+
+/**
+ * Determine whether an SPS connection end point FIFO is empty
+ *
+ * This function returns the empty state of an SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @empty - pointer to client's empty status word (boolean)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_is_pipe_empty(struct sps_pipe *h, u32 *empty);
+
+/**
+ * Reset an SPS BAM device
+ *
+ * This function resets an SPS BAM device.
+ *
+ * @dev - device handle for the BAM
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_device_reset(unsigned long dev);
+
+/**
+ * Set the configuration parameters for an SPS connection end point
+ *
+ * This function sets the configuration parameters for an SPS connection
+ * end point. This function may be called before the end point is connected
+ * (before sps_connect is called). This allows the client to specify
+ *parameters before the connection is established. The client is allowed
+ * to pre-allocate resources and override driver defaults.
+ *
+ * The client must call sps_get_config() to fill it's struct sps_connect
+ * struct before modifying values and passing the struct to this function.
+ * Only those parameters that differ from the current configuration will
+ * be processed.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @config - Pointer to the end point's new configuration parameters.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_set_config(struct sps_pipe *h, struct sps_connect *config);
+
+/**
+ * Set ownership of an SPS connection end point
+ *
+ * This function sets the ownership of an SPS connection end point to
+ * either local (default) or non-local. This function is used to
+ * retrieve the struct sps_connect data that must be used by a
+ * satellite processor when calling sps_connect().
+ *
+ * Non-local ownership is only possible/meaningful on the processor
+ * that controls resource allocations (apps processor). Setting ownership
+ * to non-local on a satellite processor will fail.
+ *
+ * Setting ownership from non-local to local will succeed only if the
+ * owning satellite processor has properly brought the end point to
+ * an idle condition.
+ *
+ * This function will succeed if the connection end point is already in
+ * the specified ownership state.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @owner - New ownership of the connection end point
+ *
+ * @connect - Pointer to buffer for satellite processor connect data.
+ *  Can be NULL to avoid retrieving the connect data. Will be ignored
+ *  if the end point ownership is set to local.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_set_owner(struct sps_pipe *h, enum sps_owner owner,
+		  struct sps_satellite *connect);
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+/**
+ * Allocate a BAM DMA channel
+ *
+ * This function allocates a BAM DMA channel. A "BAM DMA" is a special
+ * DMA peripheral with a BAM front end. The DMA peripheral acts as a conduit
+ * for data to flow into a consumer pipe and then out of a producer pipe.
+ * It's primarily purpose is to serve as a path for interprocessor communication
+ * that allows each processor to control and protect it's own memory space.
+ *
+ * @alloc - Pointer to struct for BAM DMA channel allocation properties.
+ *
+ * @chan - Allocated channel information will be written to this
+ *  location (output).
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc,
+		       struct sps_dma_chan *chan);
+
+/**
+ * Free a BAM DMA channel
+ *
+ * This function frees a BAM DMA channel.
+ *
+ * @chan - Pointer to information for channel to free
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_free_dma_chan(struct sps_dma_chan *chan);
+
+/**
+ * Get the BAM handle for BAM-DMA.
+ *
+ * The BAM handle should be use as source/destination in the sps_connect().
+ *
+ * @return handle on success, zero on error
+ *
+ */
+unsigned long sps_dma_get_bam_handle(void);
+
+/**
+ * Free the BAM handle for BAM-DMA.
+ *
+ */
+void sps_dma_free_bam_handle(unsigned long h);
+#else
+static inline int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc,
+		       struct sps_dma_chan *chan)
+{
+	return -EPERM;
+}
+
+static inline int sps_free_dma_chan(struct sps_dma_chan *chan)
+{
+	return -EPERM;
+}
+
+static inline unsigned long sps_dma_get_bam_handle(void)
+{
+	return 0;
+}
+
+static inline void sps_dma_free_bam_handle(unsigned long h)
+{
+}
+#endif
+
+/**
+ * Get number of free transfer entries for an SPS connection end point
+ *
+ * This function returns the number of free transfer entries for an
+ * SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @count - pointer to count status
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_free_count(struct sps_pipe *h, u32 *count);
+
+/**
+ * Perform timer control
+ *
+ * This function performs timer control operations.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @timer_ctrl - Pointer to timer control specification
+ *
+ * @timer_result - Pointer to buffer for timer operation result.
+ *  This argument can be NULL if no result is expected for the operation.
+ *  If non-NULL, the current timer value will always provided.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_timer_ctrl(struct sps_pipe *h,
+		   struct sps_timer_ctrl *timer_ctrl,
+		   struct sps_timer_result *timer_result);
+
+/**
+ * Find the handle of a BAM device based on the physical address
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified physical address, and returns its handle.
+ *
+ * @phys_addr - physical address of the BAM
+ *
+ * @h - device handle of the BAM
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_phy2h(phys_addr_t phys_addr, unsigned long *handle);
+
+/**
+ * Setup desc/data FIFO for bam-to-bam connection
+ *
+ * @mem_buffer - Pointer to struct for allocated memory properties.
+ *
+ * @addr - address of FIFO
+ *
+ * @size - FIFO size
+ *
+ * @use_offset - use address offset instead of absolute address
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_setup_bam2bam_fifo(struct sps_mem_buffer *mem_buffer,
+		  u32 addr, u32 size, int use_offset);
+
+/**
+ * Get the number of unused descriptors in the descriptor FIFO
+ * of a pipe
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @desc_num - number of unused descriptors
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_unused_desc_num(struct sps_pipe *h, u32 *desc_num);
+
+/**
+ * Get the debug info of BAM registers and descriptor FIFOs
+ *
+ * @dev - BAM device handle
+ *
+ * @option - debugging option
+ *
+ * @para - parameter used for an option (such as pipe combination)
+ *
+ * @tb_sel - testbus selection
+ *
+ * @desc_sel - selection of descriptors
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_bam_debug_info(unsigned long dev, u32 option, u32 para,
+		u32 tb_sel, u32 desc_sel);
+
+/**
+ * Vote for or relinquish BAM DMA clock
+ *
+ * @clk_on - to turn on or turn off the clock
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_ctrl_bam_dma_clk(bool clk_on);
+
+/*
+ * sps_pipe_reset - reset a pipe of a BAM.
+ * @dev:	BAM device handle
+ * @pipe:	pipe index
+ *
+ * This function resets a pipe of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_pipe_reset(unsigned long dev, u32 pipe);
+
+/*
+ * sps_pipe_disable - disable a pipe of a BAM.
+ * @dev:	BAM device handle
+ * @pipe:	pipe index
+ *
+ * This function disables a pipe of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_pipe_disable(unsigned long dev, u32 pipe);
+
+/*
+ * sps_pipe_pending_desc - checking pending descriptor.
+ * @dev:	BAM device handle
+ * @pipe:	pipe index
+ * @pending:	indicate if there is any pending descriptor.
+ *
+ * This function checks if a pipe of a BAM has any pending descriptor.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_pipe_pending_desc(unsigned long dev, u32 pipe, bool *pending);
+
+/*
+ * sps_bam_process_irq - process IRQ of a BAM.
+ * @dev:	BAM device handle
+ *
+ * This function processes any pending IRQ of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_bam_process_irq(unsigned long dev);
+
+/*
+ * sps_get_bam_addr - get address info of a BAM.
+ * @dev:	BAM device handle
+ * @base:	beginning address
+ * @size:	address range size
+ *
+ * This function returns the address info of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_get_bam_addr(unsigned long dev, phys_addr_t *base,
+				u32 *size);
+
+/*
+ * sps_pipe_inject_zlt - inject a ZLT with EOT.
+ * @dev:	BAM device handle
+ * @pipe_index:	pipe index
+ *
+ * This function injects a ZLT with EOT for a pipe of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_pipe_inject_zlt(unsigned long dev, u32 pipe_index);
+#else
+static inline int sps_register_bam_device(const struct sps_bam_props
+			*bam_props, unsigned long *dev_handle)
+{
+	return -EPERM;
+}
+
+static inline int sps_deregister_bam_device(unsigned long dev_handle)
+{
+	return -EPERM;
+}
+
+static inline struct sps_pipe *sps_alloc_endpoint(void)
+{
+	return NULL;
+}
+
+static inline int sps_free_endpoint(struct sps_pipe *h)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_config(struct sps_pipe *h, struct sps_connect *config)
+{
+	return -EPERM;
+}
+
+static inline int sps_alloc_mem(struct sps_pipe *h, enum sps_mem mem,
+		  struct sps_mem_buffer *mem_buffer)
+{
+	return -EPERM;
+}
+
+static inline int sps_free_mem(struct sps_pipe *h,
+				struct sps_mem_buffer *mem_buffer)
+{
+	return -EPERM;
+}
+
+static inline int sps_connect(struct sps_pipe *h, struct sps_connect *connect)
+{
+	return -EPERM;
+}
+
+static inline int sps_disconnect(struct sps_pipe *h)
+{
+	return -EPERM;
+}
+
+static inline int sps_register_event(struct sps_pipe *h,
+					struct sps_register_event *reg)
+{
+	return -EPERM;
+}
+
+static inline int sps_transfer_one(struct sps_pipe *h, phys_addr_t addr,
+					u32 size, void *user, u32 flags)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_event(struct sps_pipe *h,
+				struct sps_event_notify *event)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_iovec(struct sps_pipe *h, struct sps_iovec *iovec)
+{
+	return -EPERM;
+}
+
+static inline int sps_flow_on(struct sps_pipe *h)
+{
+	return -EPERM;
+}
+
+static inline int sps_flow_off(struct sps_pipe *h, enum sps_flow_off mode)
+{
+	return -EPERM;
+}
+
+static inline int sps_transfer(struct sps_pipe *h,
+				struct sps_transfer *transfer)
+{
+	return -EPERM;
+}
+
+static inline int sps_is_pipe_empty(struct sps_pipe *h, u32 *empty)
+{
+	return -EPERM;
+}
+
+static inline int sps_device_reset(unsigned long dev)
+{
+	return -EPERM;
+}
+
+static inline int sps_set_config(struct sps_pipe *h, struct sps_connect *config)
+{
+	return -EPERM;
+}
+
+static inline int sps_set_owner(struct sps_pipe *h, enum sps_owner owner,
+		  struct sps_satellite *connect)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_free_count(struct sps_pipe *h, u32 *count)
+{
+	return -EPERM;
+}
+
+static inline int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc,
+		       struct sps_dma_chan *chan)
+{
+	return -EPERM;
+}
+
+static inline int sps_free_dma_chan(struct sps_dma_chan *chan)
+{
+	return -EPERM;
+}
+
+static inline unsigned long sps_dma_get_bam_handle(void)
+{
+	return 0;
+}
+
+static inline void sps_dma_free_bam_handle(unsigned long h)
+{
+}
+
+static inline int sps_timer_ctrl(struct sps_pipe *h,
+		   struct sps_timer_ctrl *timer_ctrl,
+		   struct sps_timer_result *timer_result)
+{
+	return -EPERM;
+}
+
+static inline int sps_phy2h(phys_addr_t phys_addr, unsigned long *handle)
+{
+	return -EPERM;
+}
+
+static inline int sps_setup_bam2bam_fifo(struct sps_mem_buffer *mem_buffer,
+		  u32 addr, u32 size, int use_offset)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_unused_desc_num(struct sps_pipe *h, u32 *desc_num)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_bam_debug_info(unsigned long dev, u32 option,
+		u32 para, u32 tb_sel, u32 desc_sel)
+{
+	return -EPERM;
+}
+
+static inline int sps_ctrl_bam_dma_clk(bool clk_on)
+{
+	return -EPERM;
+}
+
+static inline int sps_pipe_reset(unsigned long dev, u32 pipe)
+{
+	return -EPERM;
+}
+
+static inline int sps_pipe_disable(unsigned long dev, u32 pipe)
+{
+	return -EPERM;
+}
+
+static inline int sps_pipe_pending_desc(unsigned long dev, u32 pipe,
+					bool *pending)
+{
+	return -EPERM;
+}
+
+static inline int sps_bam_process_irq(unsigned long dev)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_bam_addr(unsigned long dev, phys_addr_t *base,
+				u32 *size)
+{
+	return -EPERM;
+}
+
+static inline int sps_pipe_inject_zlt(unsigned long dev, u32 pipe_index)
+{
+	return -EPERM;
+}
+#endif
+
+#endif /* _SPS_H_ */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e7d343f..6762e21 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3356,6 +3356,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 bool is_skb_forwardable(const struct net_device *dev,
 			const struct sk_buff *skb);
 
+static __always_inline int ____dev_forward_skb(struct net_device *dev,
+					       struct sk_buff *skb)
+{
+	if (skb_orphan_frags(skb, GFP_ATOMIC) ||
+	    unlikely(!is_skb_forwardable(dev, skb))) {
+		atomic_long_inc(&dev->rx_dropped);
+		kfree_skb(skb);
+		return NET_RX_DROP;
+	}
+
+	skb_scrub_packet(skb, true);
+	skb->priority = 0;
+	return 0;
+}
+
 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
 
 extern int		netdev_budget;
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index ab02a45..e5d1934 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -25,6 +25,7 @@ struct svc_xprt_ops {
 	void		(*xpo_detach)(struct svc_xprt *);
 	void		(*xpo_free)(struct svc_xprt *);
 	int		(*xpo_secure_port)(struct svc_rqst *);
+	void		(*xpo_kill_temp_xprt)(struct svc_xprt *);
 };
 
 struct svc_xprt_class {
diff --git a/include/net/ip.h b/include/net/ip.h
index 55cdaac..b043c7d 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -47,8 +47,7 @@ struct inet_skb_parm {
 #define IPSKB_REROUTED		BIT(4)
 #define IPSKB_DOREDIRECT	BIT(5)
 #define IPSKB_FRAG_PMTU		BIT(6)
-#define IPSKB_FRAG_SEGS		BIT(7)
-#define IPSKB_L3SLAVE		BIT(8)
+#define IPSKB_L3SLAVE		BIT(7)
 
 	u16			frag_max_size;
 };
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 20ed969..1b1cf33 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -146,6 +146,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
 {
 	int pkt_len, err;
 
+	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
 	pkt_len = skb->len - skb_inner_network_offset(skb);
 	err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
 	if (unlikely(net_xmit_eval(err)))
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index 4988146..1723a67 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -30,8 +30,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct)
 	if (net->ct.labels_used == 0)
 		return NULL;
 
-	return nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS,
-				    sizeof(struct nf_conn_labels), GFP_ATOMIC);
+	return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC);
 #else
 	return NULL;
 #endif
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 5031e07..d79d1e9 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -145,7 +145,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
 	return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
 }
 
-unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
+int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
 unsigned int nft_parse_register(const struct nlattr *attr);
 int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
 
@@ -542,7 +542,8 @@ void *nft_set_elem_init(const struct nft_set *set,
 			const struct nft_set_ext_tmpl *tmpl,
 			const u32 *key, const u32 *data,
 			u64 timeout, gfp_t gfp);
-void nft_set_elem_destroy(const struct nft_set *set, void *elem);
+void nft_set_elem_destroy(const struct nft_set *set, void *elem,
+			  bool destroy_expr);
 
 /**
  *	struct nft_set_gc_batch_head - nf_tables set garbage collection batch
@@ -693,7 +694,6 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
 {
 	int err;
 
-	__module_get(src->ops->type->owner);
 	if (src->ops->clone) {
 		dst->ops = src->ops;
 		err = src->ops->clone(dst, src);
@@ -702,6 +702,8 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
 	} else {
 		memcpy(dst, src, src->ops->size);
 	}
+
+	__module_get(src->ops->type->owner);
 	return 0;
 }
 
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 87a7f42..31acc3f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -152,7 +152,7 @@ void sctp_unhash_endpoint(struct sctp_endpoint *);
 struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
 			     struct sctphdr *, struct sctp_association **,
 			     struct sctp_transport **);
-void sctp_err_finish(struct sock *, struct sctp_association *);
+void sctp_err_finish(struct sock *, struct sctp_transport *);
 void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
 			   struct sctp_transport *t, __u32 pmtu);
 void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
diff --git a/include/net/sock.h b/include/net/sock.h
index 73c6b00..92b2697 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1596,11 +1596,11 @@ static inline void sock_put(struct sock *sk)
 void sock_gen_put(struct sock *sk);
 
 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
-		     unsigned int trim_cap);
+		     unsigned int trim_cap, bool refcounted);
 static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
 				 const int nested)
 {
-	return __sk_receive_skb(sk, skb, nested, 1);
+	return __sk_receive_skb(sk, skb, nested, 1, true);
 }
 
 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 46450d0..2700f92 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -806,7 +806,7 @@ static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
 {
 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 	if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
-	    ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
+	    skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
 		return true;
 #endif
 	return false;
@@ -1221,6 +1221,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
 
 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
+int tcp_filter(struct sock *sk, struct sk_buff *skb);
 
 #undef STATE_TRACE
 
diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h
index 7d46c73..a32de45 100644
--- a/include/soc/qcom/subsystem_restart.h
+++ b/include/soc/qcom/subsystem_restart.h
@@ -113,6 +113,7 @@ extern int subsystem_crashed(const char *name);
 
 extern void *subsystem_get(const char *name);
 extern void *subsystem_get_with_fwname(const char *name, const char *fw_name);
+extern int subsystem_set_fwname(const char *name, const char *fw_name);
 extern void subsystem_put(void *subsystem);
 
 extern struct subsys_device *subsys_register(struct subsys_desc *desc);
@@ -157,6 +158,11 @@ static inline void *subsystem_get_with_fwname(const char *name,
 	return NULL;
 }
 
+static inline int subsystem_set_fwname(const char *name,
+				const char *fw_name) {
+	return 0;
+}
+
 static inline void subsystem_put(void *subsystem) { }
 
 static inline
diff --git a/include/soc/qcom/sysmon.h b/include/soc/qcom/sysmon.h
new file mode 100644
index 0000000..56860db
--- /dev/null
+++ b/include/soc/qcom/sysmon.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_SYSMON_H
+#define __MSM_SYSMON_H
+
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+/**
+ * enum ssctl_ssr_event_enum_type - Subsystem notification type.
+ */
+enum ssctl_ssr_event_enum_type {
+	SSCTL_SSR_EVENT_ENUM_TYPE_MIN_ENUM_VAL = -2147483647,
+	SSCTL_SSR_EVENT_BEFORE_POWERUP = 0,
+	SSCTL_SSR_EVENT_AFTER_POWERUP = 1,
+	SSCTL_SSR_EVENT_BEFORE_SHUTDOWN = 2,
+	SSCTL_SSR_EVENT_AFTER_SHUTDOWN = 3,
+	SSCTL_SSR_EVENT_ENUM_TYPE_MAX_ENUM_VAL = 2147483647
+};
+
+/**
+ * enum ssctl_ssr_event_driven_enum_type - Subsystem shutdown type.
+ */
+enum ssctl_ssr_event_driven_enum_type {
+	SSCTL_SSR_EVENT_DRIVEN_ENUM_TYPE_MIN_ENUM_VAL = -2147483647,
+	SSCTL_SSR_EVENT_FORCED = 0,
+	SSCTL_SSR_EVENT_GRACEFUL = 1,
+	SSCTL_SSR_EVENT_DRIVEN_ENUM_TYPE_MAX_ENUM_VAL = 2147483647
+};
+
+#if defined(CONFIG_MSM_SYSMON_COMM) || defined(CONFIG_MSM_SYSMON_GLINK_COMM)
+extern int sysmon_send_event(struct subsys_desc *dest_desc,
+			struct subsys_desc *event_desc,
+			enum subsys_notif_type notif);
+extern int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+				struct subsys_desc *event_desc,
+				enum subsys_notif_type notif);
+extern int sysmon_get_reason(struct subsys_desc *dest_desc, char *buf,
+				size_t len);
+extern int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+				char *buf, size_t len);
+extern int sysmon_send_shutdown(struct subsys_desc *dest_desc);
+extern int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc);
+extern int sysmon_notifier_register(struct subsys_desc *desc);
+extern void sysmon_notifier_unregister(struct subsys_desc *desc);
+#else
+static inline int sysmon_send_event(struct subsys_desc *dest_desc,
+					struct subsys_desc *event_desc,
+					enum subsys_notif_type notif)
+{
+	return 0;
+}
+static inline int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+						struct subsys_desc *event_desc,
+						enum subsys_notif_type notif)
+{
+	return 0;
+}
+static inline int sysmon_get_reason(struct subsys_desc *dest_desc,
+					char *buf, size_t len)
+{
+	return 0;
+}
+static inline int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+						char *buf, size_t len)
+{
+	return 0;
+}
+static inline int sysmon_send_shutdown(struct subsys_desc *dest_desc)
+{
+	return 0;
+}
+static inline int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc)
+{
+	return 0;
+}
+static inline int sysmon_notifier_register(struct subsys_desc *desc)
+{
+	return 0;
+}
+static inline void sysmon_notifier_unregister(struct subsys_desc *desc)
+{
+}
+#endif
+
+#if defined(CONFIG_MSM_SYSMON_GLINK_COMM)
+extern int sysmon_glink_register(struct subsys_desc *desc);
+extern void sysmon_glink_unregister(struct subsys_desc *desc);
+#else
+static inline int sysmon_glink_register(struct subsys_desc *desc)
+{
+	return 0;
+}
+static inline void sysmon_glink_unregister(struct subsys_desc *desc)
+{
+}
+#endif
+#endif
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 8f09a32..f8e6b3b 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -117,6 +117,7 @@
 header-y += elf.h
 header-y += errno.h
 header-y += errqueue.h
+header-y += esoc_ctrl.h
 header-y += ethtool.h
 header-y += eventpoll.h
 header-y += fadvise.h
diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h
index 5cd4d4d..9c9c6ad 100644
--- a/include/uapi/linux/atm_zatm.h
+++ b/include/uapi/linux/atm_zatm.h
@@ -14,7 +14,6 @@
 
 #include <linux/atmapi.h>
 #include <linux/atmioc.h>
-#include <linux/time.h>
 
 #define ZATM_GETPOOL	_IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc)
 						/* get pool statistics */
diff --git a/include/uapi/linux/bpqether.h b/include/uapi/linux/bpqether.h
index a6c35e1..05865ed 100644
--- a/include/uapi/linux/bpqether.h
+++ b/include/uapi/linux/bpqether.h
@@ -5,9 +5,7 @@
  * 	Defines for the BPQETHER pseudo device driver
  */
 
-#ifndef __LINUX_IF_ETHER_H
 #include <linux/if_ether.h>
-#endif
 
 #define SIOCSBPQETHOPT		(SIOCDEVPRIVATE+0)	/* reserved */
 #define SIOCSBPQETHADDR		(SIOCDEVPRIVATE+1)
diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h
new file mode 100644
index 0000000..1b17e1c
--- /dev/null
+++ b/include/uapi/linux/esoc_ctrl.h
@@ -0,0 +1,75 @@
+#ifndef _UAPI_ESOC_CTRL_H_
+#define _UAPI_ESOC_CTRL_H_
+
+#include <linux/types.h>
+
+#define ESOC_CODE		0xCC
+
+#define ESOC_CMD_EXE		_IOW(ESOC_CODE, 1, __u32)
+#define ESOC_WAIT_FOR_REQ	_IOR(ESOC_CODE, 2, __u32)
+#define ESOC_NOTIFY		_IOW(ESOC_CODE, 3, __u32)
+#define ESOC_GET_STATUS		_IOR(ESOC_CODE, 4, __u32)
+#define ESOC_WAIT_FOR_CRASH	_IOR(ESOC_CODE, 6, __u32)
+#define ESOC_REG_REQ_ENG	_IO(ESOC_CODE, 7)
+#define ESOC_REG_CMD_ENG	_IO(ESOC_CODE, 8)
+
+/*Link types for communication with external SOCs*/
+#define HSIC		"HSIC"
+#define HSICPCIe	"HSIC+PCIe"
+#define PCIe		"PCIe"
+
+enum esoc_evt {
+	ESOC_RUN_STATE = 0x1,
+	ESOC_UNEXPECTED_RESET,
+	ESOC_ERR_FATAL,
+	ESOC_IN_DEBUG,
+	ESOC_REQ_ENG_ON,
+	ESOC_REQ_ENG_OFF,
+	ESOC_CMD_ENG_ON,
+	ESOC_CMD_ENG_OFF,
+	ESOC_INVALID_STATE,
+};
+
+enum esoc_cmd {
+	ESOC_PWR_ON = 1,
+	ESOC_PWR_OFF,
+	ESOC_FORCE_PWR_OFF,
+	ESOC_RESET,
+	ESOC_PREPARE_DEBUG,
+	ESOC_EXE_DEBUG,
+	ESOC_EXIT_DEBUG,
+};
+
+enum esoc_notify {
+	ESOC_IMG_XFER_DONE = 1,
+	ESOC_BOOT_DONE,
+	ESOC_BOOT_FAIL,
+	ESOC_IMG_XFER_RETRY,
+	ESOC_IMG_XFER_FAIL,
+	ESOC_UPGRADE_AVAILABLE,
+	ESOC_DEBUG_DONE,
+	ESOC_DEBUG_FAIL,
+	ESOC_PRIMARY_CRASH,
+	ESOC_PRIMARY_REBOOT,
+};
+
+enum esoc_req {
+	ESOC_REQ_IMG = 1,
+	ESOC_REQ_DEBUG,
+	ESOC_REQ_SHUTDOWN,
+};
+
+#ifdef __KERNEL__
+/**
+ * struct esoc_handle: Handle for clients of esoc
+ * @name: name of the external soc.
+ * @link: link of external soc.
+ * @id: id of external soc.
+ */
+struct esoc_handle {
+	const char *name;
+	const char *link;
+	unsigned int id;
+};
+#endif
+#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 300ef25..4ee67cb 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -972,12 +972,19 @@ struct kvm_irqfd {
 	__u8  pad[16];
 };
 
+/* For KVM_CAP_ADJUST_CLOCK */
+
+/* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags.  */
+#define KVM_CLOCK_TSC_STABLE		2
+
 struct kvm_clock_data {
 	__u64 clock;
 	__u32 flags;
 	__u32 pad[9];
 };
 
+/* For KVM_CAP_SW_TLB */
+
 #define KVM_MMU_FSL_BOOKE_NOHV		0
 #define KVM_MMU_FSL_BOOKE_HV		1
 
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 570eeca..ad1bc67 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -687,7 +687,8 @@ static void delete_all_elements(struct bpf_htab *htab)
 
 		hlist_for_each_entry_safe(l, n, head, hash_node) {
 			hlist_del_rcu(&l->hash_node);
-			htab_elem_free(htab, l);
+			if (l->state != HTAB_EXTRA_ELEM_USED)
+				htab_elem_free(htab, l);
 		}
 	}
 }
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 228f962..237f3d6 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -194,7 +194,7 @@ static int map_create(union bpf_attr *attr)
 
 	err = bpf_map_charge_memlock(map);
 	if (err)
-		goto free_map;
+		goto free_map_nouncharge;
 
 	err = bpf_map_new_fd(map);
 	if (err < 0)
@@ -204,6 +204,8 @@ static int map_create(union bpf_attr *attr)
 	return err;
 
 free_map:
+	bpf_map_uncharge_memlock(map);
+free_map_nouncharge:
 	map->ops->map_free(map);
 	return err;
 }
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 16ea095..4f64490 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1344,12 +1344,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
 	} else if (new->flags & IRQF_TRIGGER_MASK) {
 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
-		unsigned int omsk = irq_settings_get_trigger_mask(desc);
+		unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
 
 		if (nmsk != omsk)
 			/* hope the handler works with current  trigger mode */
 			pr_warn("irq %d uses trigger mode %u; requested %u\n",
-				irq, nmsk, omsk);
+				irq, omsk, nmsk);
 	}
 
 	*old_ptr = new;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 7264ce2..b38f3fb 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -787,8 +787,6 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
 	return ret;
 }
 
-static void cont_flush(void);
-
 static ssize_t devkmsg_read(struct file *file, char __user *buf,
 			    size_t count, loff_t *ppos)
 {
@@ -804,7 +802,6 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
 	if (ret)
 		return ret;
 	raw_spin_lock_irq(&logbuf_lock);
-	cont_flush();
 	while (user->seq == log_next_seq) {
 		if (file->f_flags & O_NONBLOCK) {
 			ret = -EAGAIN;
@@ -867,7 +864,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
 		return -ESPIPE;
 
 	raw_spin_lock_irq(&logbuf_lock);
-	cont_flush();
 	switch (whence) {
 	case SEEK_SET:
 		/* the first record */
@@ -906,7 +902,6 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
 	poll_wait(file, &log_wait, wait);
 
 	raw_spin_lock_irq(&logbuf_lock);
-	cont_flush();
 	if (user->seq < log_next_seq) {
 		/* return error when data has vanished underneath us */
 		if (user->seq < log_first_seq)
@@ -1293,7 +1288,6 @@ static int syslog_print(char __user *buf, int size)
 		size_t skip;
 
 		raw_spin_lock_irq(&logbuf_lock);
-		cont_flush();
 		if (syslog_seq < log_first_seq) {
 			/* messages are gone, move to first one */
 			syslog_seq = log_first_seq;
@@ -1353,7 +1347,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
 		return -ENOMEM;
 
 	raw_spin_lock_irq(&logbuf_lock);
-	cont_flush();
 	if (buf) {
 		u64 next_seq;
 		u64 seq;
@@ -1515,7 +1508,6 @@ int do_syslog(int type, char __user *buf, int len, int source)
 	/* Number of chars in the log buffer */
 	case SYSLOG_ACTION_SIZE_UNREAD:
 		raw_spin_lock_irq(&logbuf_lock);
-		cont_flush();
 		if (syslog_seq < log_first_seq) {
 			/* messages are gone, move to first one */
 			syslog_seq = log_first_seq;
@@ -3036,7 +3028,6 @@ void kmsg_dump(enum kmsg_dump_reason reason)
 		dumper->active = true;
 
 		raw_spin_lock_irqsave(&logbuf_lock, flags);
-		cont_flush();
 		dumper->cur_seq = clear_seq;
 		dumper->cur_idx = clear_idx;
 		dumper->next_seq = log_next_seq;
@@ -3127,7 +3118,6 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
 	bool ret;
 
 	raw_spin_lock_irqsave(&logbuf_lock, flags);
-	cont_flush();
 	ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
 	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
 
@@ -3170,7 +3160,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
 		goto out;
 
 	raw_spin_lock_irqsave(&logbuf_lock, flags);
-	cont_flush();
 	if (dumper->cur_seq < log_first_seq) {
 		/* messages are gone, move to first available one */
 		dumper->cur_seq = log_first_seq;
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index b3f05ee..cbb387a 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -54,7 +54,11 @@ static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1
 	[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
 	[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
 
-static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
+/*
+ * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
+ * Make sure they are always aligned.
+ */
+static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
 	[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
 };
 
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2050a765..da87b3c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1862,6 +1862,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
 
 	/* Update rec->flags */
 	do_for_each_ftrace_rec(pg, rec) {
+
+		if (rec->flags & FTRACE_FL_DISABLED)
+			continue;
+
 		/* We need to update only differences of filter_hash */
 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
@@ -1884,6 +1888,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
 
 	/* Roll back what we did above */
 	do_for_each_ftrace_rec(pg, rec) {
+
+		if (rec->flags & FTRACE_FL_DISABLED)
+			continue;
+
 		if (rec == end)
 			goto err_out;
 
@@ -2397,6 +2405,10 @@ void __weak ftrace_replace_code(int enable)
 		return;
 
 	do_for_each_ftrace_rec(pg, rec) {
+
+		if (rec->flags & FTRACE_FL_DISABLED)
+			continue;
+
 		failed = __ftrace_replace_code(rec, enable);
 		if (failed) {
 			ftrace_bug(failed, rec);
@@ -2763,7 +2775,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
 		struct dyn_ftrace *rec;
 
 		do_for_each_ftrace_rec(pg, rec) {
-			if (FTRACE_WARN_ON_ONCE(rec->flags))
+			if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
 				pr_warn("  %pS flags:%lx\n",
 					(void *)rec->ip, rec->flags);
 		} while_for_each_ftrace_rec();
@@ -3598,6 +3610,10 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
 		goto out_unlock;
 
 	do_for_each_ftrace_rec(pg, rec) {
+
+		if (rec->flags & FTRACE_FL_DISABLED)
+			continue;
+
 		if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
 			ret = enter_record(hash, rec, clear_filter);
 			if (ret < 0) {
@@ -3793,6 +3809,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
 
 	do_for_each_ftrace_rec(pg, rec) {
 
+		if (rec->flags & FTRACE_FL_DISABLED)
+			continue;
+
 		if (!ftrace_match_record(rec, &func_g, NULL, 0))
 			continue;
 
@@ -4685,6 +4704,9 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
 
 	do_for_each_ftrace_rec(pg, rec) {
 
+		if (rec->flags & FTRACE_FL_DISABLED)
+			continue;
+
 		if (ftrace_match_record(rec, &func_g, NULL, 0)) {
 			/* if it is in the array */
 			exists = false;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f0c7f14..f2bd21b 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -683,10 +683,11 @@ static void pipe_advance(struct iov_iter *i, size_t size)
 	struct pipe_inode_info *pipe = i->pipe;
 	struct pipe_buffer *buf;
 	int idx = i->idx;
-	size_t off = i->iov_offset;
+	size_t off = i->iov_offset, orig_sz;
 	
 	if (unlikely(i->count < size))
 		size = i->count;
+	orig_sz = size;
 
 	if (size) {
 		if (off) /* make it relative to the beginning of buffer */
@@ -713,6 +714,7 @@ static void pipe_advance(struct iov_iter *i, size_t size)
 			pipe->nrbufs--;
 		}
 	}
+	i->count -= orig_sz;
 }
 
 void iov_iter_advance(struct iov_iter *i, size_t size)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index cdcd25c..eff3de3 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1426,11 +1426,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 
 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 		  unsigned long new_addr, unsigned long old_end,
-		  pmd_t *old_pmd, pmd_t *new_pmd)
+		  pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
 {
 	spinlock_t *old_ptl, *new_ptl;
 	pmd_t pmd;
 	struct mm_struct *mm = vma->vm_mm;
+	bool force_flush = false;
 
 	if ((old_addr & ~HPAGE_PMD_MASK) ||
 	    (new_addr & ~HPAGE_PMD_MASK) ||
@@ -1455,6 +1456,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 		new_ptl = pmd_lockptr(mm, new_pmd);
 		if (new_ptl != old_ptl)
 			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+		if (pmd_present(*old_pmd) && pmd_dirty(*old_pmd))
+			force_flush = true;
 		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
 		VM_BUG_ON(!pmd_none(*new_pmd));
 
@@ -1467,6 +1470,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 		set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
 		if (new_ptl != old_ptl)
 			spin_unlock(new_ptl);
+		if (force_flush)
+			flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
+		else
+			*need_flush = true;
 		spin_unlock(old_ptl);
 		return true;
 	}
diff --git a/mm/mremap.c b/mm/mremap.c
index da22ad2..6ccecc0 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -104,11 +104,13 @@ static pte_t move_soft_dirty_pte(pte_t pte)
 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
 		unsigned long old_addr, unsigned long old_end,
 		struct vm_area_struct *new_vma, pmd_t *new_pmd,
-		unsigned long new_addr, bool need_rmap_locks)
+		unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	pte_t *old_pte, *new_pte, pte;
 	spinlock_t *old_ptl, *new_ptl;
+	bool force_flush = false;
+	unsigned long len = old_end - old_addr;
 
 	/*
 	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
@@ -146,6 +148,14 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
 				   new_pte++, new_addr += PAGE_SIZE) {
 		if (pte_none(*old_pte))
 			continue;
+
+		/*
+		 * We are remapping a dirty PTE, make sure to
+		 * flush TLB before we drop the PTL for the
+		 * old PTE or we may race with page_mkclean().
+		 */
+		if (pte_present(*old_pte) && pte_dirty(*old_pte))
+			force_flush = true;
 		pte = ptep_get_and_clear(mm, old_addr, old_pte);
 		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
 		pte = move_soft_dirty_pte(pte);
@@ -156,6 +166,10 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
 	if (new_ptl != old_ptl)
 		spin_unlock(new_ptl);
 	pte_unmap(new_pte - 1);
+	if (force_flush)
+		flush_tlb_range(vma, old_end - len, old_end);
+	else
+		*need_flush = true;
 	pte_unmap_unlock(old_pte - 1, old_ptl);
 	if (need_rmap_locks)
 		drop_rmap_locks(vma);
@@ -201,13 +215,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
 				if (need_rmap_locks)
 					take_rmap_locks(vma);
 				moved = move_huge_pmd(vma, old_addr, new_addr,
-						    old_end, old_pmd, new_pmd);
+						    old_end, old_pmd, new_pmd,
+						    &need_flush);
 				if (need_rmap_locks)
 					drop_rmap_locks(vma);
-				if (moved) {
-					need_flush = true;
+				if (moved)
 					continue;
-				}
 			}
 			split_huge_pmd(vma, old_pmd, old_addr);
 			if (pmd_trans_unstable(old_pmd))
@@ -220,11 +233,10 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
 			extent = next - new_addr;
 		if (extent > LATENCY_LIMIT)
 			extent = LATENCY_LIMIT;
-		move_ptes(vma, old_pmd, old_addr, old_addr + extent,
-			  new_vma, new_pmd, new_addr, need_rmap_locks);
-		need_flush = true;
+		move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
+			  new_pmd, new_addr, need_rmap_locks, &need_flush);
 	}
-	if (likely(need_flush))
+	if (need_flush)
 		flush_tlb_range(vma, old_end-len, old_addr);
 
 	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 8e999ff..8af9d25 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1549,24 +1549,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
 	struct sock *sk = sock->sk;
 	struct bcm_sock *bo = bcm_sk(sk);
+	int ret = 0;
 
 	if (len < sizeof(*addr))
 		return -EINVAL;
 
-	if (bo->bound)
-		return -EISCONN;
+	lock_sock(sk);
+
+	if (bo->bound) {
+		ret = -EISCONN;
+		goto fail;
+	}
 
 	/* bind a device to this socket */
 	if (addr->can_ifindex) {
 		struct net_device *dev;
 
 		dev = dev_get_by_index(&init_net, addr->can_ifindex);
-		if (!dev)
-			return -ENODEV;
-
+		if (!dev) {
+			ret = -ENODEV;
+			goto fail;
+		}
 		if (dev->type != ARPHRD_CAN) {
 			dev_put(dev);
-			return -ENODEV;
+			ret = -ENODEV;
+			goto fail;
 		}
 
 		bo->ifindex = dev->ifindex;
@@ -1577,17 +1584,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
 		bo->ifindex = 0;
 	}
 
-	bo->bound = 1;
-
 	if (proc_dir) {
 		/* unique socket address as filename */
 		sprintf(bo->procname, "%lu", sock_i_ino(sk));
 		bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
 						     proc_dir,
 						     &bcm_proc_fops, sk);
+		if (!bo->bcm_proc_read) {
+			ret = -ENOMEM;
+			goto fail;
+		}
 	}
 
-	return 0;
+	bo->bound = 1;
+
+fail:
+	release_sock(sk);
+
+	return ret;
 }
 
 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
diff --git a/net/core/dev.c b/net/core/dev.c
index 346f92f..46a4830 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1766,19 +1766,14 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
 
 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
 {
-	if (skb_orphan_frags(skb, GFP_ATOMIC) ||
-	    unlikely(!is_skb_forwardable(dev, skb))) {
-		atomic_long_inc(&dev->rx_dropped);
-		kfree_skb(skb);
-		return NET_RX_DROP;
+	int ret = ____dev_forward_skb(dev, skb);
+
+	if (likely(!ret)) {
+		skb->protocol = eth_type_trans(skb, dev);
+		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 	}
 
-	skb_scrub_packet(skb, true);
-	skb->priority = 0;
-	skb->protocol = eth_type_trans(skb, dev);
-	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(__dev_forward_skb);
 
@@ -2484,7 +2479,7 @@ int skb_checksum_help(struct sk_buff *skb)
 			goto out;
 	}
 
-	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
+	*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
 out_set_summed:
 	skb->ip_summed = CHECKSUM_NONE;
 out:
diff --git a/net/core/filter.c b/net/core/filter.c
index 00351cd..b391209 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1628,6 +1628,19 @@ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
 	return dev_forward_skb(dev, skb);
 }
 
+static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
+				      struct sk_buff *skb)
+{
+	int ret = ____dev_forward_skb(dev, skb);
+
+	if (likely(!ret)) {
+		skb->dev = dev;
+		ret = netif_rx(skb);
+	}
+
+	return ret;
+}
+
 static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
 {
 	int ret;
@@ -1647,6 +1660,51 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
 	return ret;
 }
 
+static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
+				 u32 flags)
+{
+	/* skb->mac_len is not set on normal egress */
+	unsigned int mlen = skb->network_header - skb->mac_header;
+
+	__skb_pull(skb, mlen);
+
+	/* At ingress, the mac header has already been pulled once.
+	 * At egress, skb_pospull_rcsum has to be done in case that
+	 * the skb is originated from ingress (i.e. a forwarded skb)
+	 * to ensure that rcsum starts at net header.
+	 */
+	if (!skb_at_tc_ingress(skb))
+		skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+	skb_pop_mac_header(skb);
+	skb_reset_mac_len(skb);
+	return flags & BPF_F_INGRESS ?
+	       __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
+}
+
+static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
+				 u32 flags)
+{
+	bpf_push_mac_rcsum(skb);
+	return flags & BPF_F_INGRESS ?
+	       __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
+}
+
+static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
+			  u32 flags)
+{
+	switch (dev->type) {
+	case ARPHRD_TUNNEL:
+	case ARPHRD_TUNNEL6:
+	case ARPHRD_SIT:
+	case ARPHRD_IPGRE:
+	case ARPHRD_VOID:
+	case ARPHRD_NONE:
+		return __bpf_redirect_no_mac(skb, dev, flags);
+	default:
+		return __bpf_redirect_common(skb, dev, flags);
+	}
+}
+
 BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
 {
 	struct net_device *dev;
@@ -1675,10 +1733,7 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
 		return -ENOMEM;
 	}
 
-	bpf_push_mac_rcsum(clone);
-
-	return flags & BPF_F_INGRESS ?
-	       __bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone);
+	return __bpf_redirect(clone, dev, flags);
 }
 
 static const struct bpf_func_proto bpf_clone_redirect_proto = {
@@ -1722,10 +1777,7 @@ int skb_do_redirect(struct sk_buff *skb)
 		return -EINVAL;
 	}
 
-	bpf_push_mac_rcsum(skb);
-
-	return ri->flags & BPF_F_INGRESS ?
-	       __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
+	return __bpf_redirect(skb, dev, ri->flags);
 }
 
 static const struct bpf_func_proto bpf_redirect_proto = {
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index ab193e5..69e4463 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -122,7 +122,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
 	struct flow_dissector_key_keyid *key_keyid;
 	bool skip_vlan = false;
 	u8 ip_proto = 0;
-	bool ret = false;
+	bool ret;
 
 	if (!data) {
 		data = skb->data;
@@ -549,12 +549,17 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
 out_good:
 	ret = true;
 
-out_bad:
+	key_control->thoff = (u16)nhoff;
+out:
 	key_basic->n_proto = proto;
 	key_basic->ip_proto = ip_proto;
-	key_control->thoff = (u16)nhoff;
 
 	return ret;
+
+out_bad:
+	ret = false;
+	key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
+	goto out;
 }
 EXPORT_SYMBOL(__skb_flow_dissect);
 
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index fb7348f..db313ec 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -275,6 +275,7 @@ int rtnl_unregister(int protocol, int msgtype)
 
 	rtnl_msg_handlers[protocol][msgindex].doit = NULL;
 	rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
+	rtnl_msg_handlers[protocol][msgindex].calcit = NULL;
 
 	return 0;
 }
diff --git a/net/core/sock.c b/net/core/sock.c
index c73e28f..5e3ca41 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 EXPORT_SYMBOL(sock_queue_rcv_skb);
 
 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
-		     const int nested, unsigned int trim_cap)
+		     const int nested, unsigned int trim_cap, bool refcounted)
 {
 	int rc = NET_RX_SUCCESS;
 
@@ -487,7 +487,8 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
 
 	bh_unlock_sock(sk);
 out:
-	sock_put(sk);
+	if (refcounted)
+		sock_put(sk);
 	return rc;
 discard_and_relse:
 	kfree_skb(skb);
@@ -1543,6 +1544,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 		RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
 
 		newsk->sk_err	   = 0;
+		newsk->sk_err_soft = 0;
 		newsk->sk_priority = 0;
 		newsk->sk_incoming_cpu = raw_smp_processor_id();
 		atomic64_set(&newsk->sk_cookie, 0);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 345a3ae..b567c87 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -235,7 +235,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
 {
 	const struct iphdr *iph = (struct iphdr *)skb->data;
 	const u8 offset = iph->ihl << 2;
-	const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
+	const struct dccp_hdr *dh;
 	struct dccp_sock *dp;
 	struct inet_sock *inet;
 	const int type = icmp_hdr(skb)->type;
@@ -245,11 +245,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
 	int err;
 	struct net *net = dev_net(skb->dev);
 
-	if (skb->len < offset + sizeof(*dh) ||
-	    skb->len < offset + __dccp_basic_hdr_len(dh)) {
-		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
-		return;
-	}
+	/* Only need dccph_dport & dccph_sport which are the first
+	 * 4 bytes in dccp header.
+	 * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
+	 */
+	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
+	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
+	dh = (struct dccp_hdr *)(skb->data + offset);
 
 	sk = __inet_lookup_established(net, &dccp_hashinfo,
 				       iph->daddr, dh->dccph_dport,
@@ -868,7 +870,7 @@ static int dccp_v4_rcv(struct sk_buff *skb)
 		goto discard_and_relse;
 	nf_reset(skb);
 
-	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);
+	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted);
 
 no_dccp_socket:
 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 3828f94..715e5d1 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -70,7 +70,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 			u8 type, u8 code, int offset, __be32 info)
 {
 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
-	const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
+	const struct dccp_hdr *dh;
 	struct dccp_sock *dp;
 	struct ipv6_pinfo *np;
 	struct sock *sk;
@@ -78,12 +78,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 	__u64 seq;
 	struct net *net = dev_net(skb->dev);
 
-	if (skb->len < offset + sizeof(*dh) ||
-	    skb->len < offset + __dccp_basic_hdr_len(dh)) {
-		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
-				  ICMP6_MIB_INERRORS);
-		return;
-	}
+	/* Only need dccph_dport & dccph_sport which are the first
+	 * 4 bytes in dccp header.
+	 * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
+	 */
+	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
+	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
+	dh = (struct dccp_hdr *)(skb->data + offset);
 
 	sk = __inet6_lookup_established(net, &dccp_hashinfo,
 					&hdr->daddr, dh->dccph_dport,
@@ -738,7 +739,8 @@ static int dccp_v6_rcv(struct sk_buff *skb)
 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 		goto discard_and_relse;
 
-	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;
+	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
+				refcounted) ? -1 : 0;
 
 no_dccp_socket:
 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
@@ -956,6 +958,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
 	.getsockopt	   = ipv6_getsockopt,
 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
+	.bind_conflict	   = inet6_csk_bind_conflict,
 #ifdef CONFIG_COMPAT
 	.compat_setsockopt = compat_ipv6_setsockopt,
 	.compat_getsockopt = compat_ipv6_getsockopt,
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 41e6580..9fe25bf 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1009,6 +1009,10 @@ void dccp_close(struct sock *sk, long timeout)
 		__kfree_skb(skb);
 	}
 
+	/* If socket has been already reset kill it. */
+	if (sk->sk_state == DCCP_CLOSED)
+		goto adjudge_to_death;
+
 	if (data_was_unread) {
 		/* Unread data was tossed, send an appropriate Reset Code */
 		DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 3bfd7d7..6d902a0 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -548,9 +548,9 @@ EXPORT_SYMBOL(inet_dgram_connect);
 
 static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
 {
-	DEFINE_WAIT(wait);
+	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
-	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+	add_wait_queue(sk_sleep(sk), &wait);
 	sk->sk_write_pending += writebias;
 
 	/* Basic assumption: if someone sets sk->sk_err, he _must_
@@ -560,13 +560,12 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
 	 */
 	while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
 		release_sock(sk);
-		timeo = schedule_timeout(timeo);
+		timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
 		lock_sock(sk);
 		if (signal_pending(current) || !timeo)
 			break;
-		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 	}
-	finish_wait(sk_sleep(sk), &wait);
+	remove_wait_queue(sk_sleep(sk), &wait);
 	sk->sk_write_pending -= writebias;
 	return timeo;
 }
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 31cef36..4cff74d 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2413,22 +2413,19 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
 	struct key_vector *l, **tp = &iter->tnode;
 	t_key key;
 
-	/* use cache location of next-to-find key */
+	/* use cached location of previously found key */
 	if (iter->pos > 0 && pos >= iter->pos) {
-		pos -= iter->pos;
 		key = iter->key;
 	} else {
-		iter->pos = 0;
+		iter->pos = 1;
 		key = 0;
 	}
 
-	while ((l = leaf_walk_rcu(tp, key)) != NULL) {
+	pos -= iter->pos;
+
+	while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) {
 		key = l->key + 1;
 		iter->pos++;
-
-		if (--pos <= 0)
-			break;
-
 		l = NULL;
 
 		/* handle unlikely case of a key wrap */
@@ -2437,7 +2434,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
 	}
 
 	if (l)
-		iter->key = key;	/* remember it */
+		iter->key = l->key;	/* remember it */
 	else
 		iter->pos = 0;		/* forget it */
 
@@ -2465,7 +2462,7 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
 		return fib_route_get_idx(iter, *pos);
 
 	iter->pos = 0;
-	iter->key = 0;
+	iter->key = KEY_MAX;
 
 	return SEQ_START_TOKEN;
 }
@@ -2474,7 +2471,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
 	struct fib_route_iter *iter = seq->private;
 	struct key_vector *l = NULL;
-	t_key key = iter->key;
+	t_key key = iter->key + 1;
 
 	++*pos;
 
@@ -2483,7 +2480,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 		l = leaf_walk_rcu(&iter->tnode, key);
 
 	if (l) {
-		iter->key = l->key + 1;
+		iter->key = l->key;
 		iter->pos++;
 	} else {
 		iter->pos = 0;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 38abe70..48734ee 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -477,7 +477,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
 	fl4->flowi4_proto = IPPROTO_ICMP;
 	fl4->fl4_icmp_type = type;
 	fl4->fl4_icmp_code = code;
-	fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev);
+	fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
 
 	security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
 	rt = __ip_route_output_key_hash(net, fl4,
@@ -502,7 +502,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
 	if (err)
 		goto relookup_failed;
 
-	if (inet_addr_type_dev_table(net, skb_in->dev,
+	if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev,
 				     fl4_dec.saddr) == RTN_LOCAL) {
 		rt2 = __ip_route_output_key(net, &fl4_dec);
 		if (IS_ERR(rt2))
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 8b4ffd2..9f0a7b9 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -117,7 +117,7 @@ int ip_forward(struct sk_buff *skb)
 	if (opt->is_strictroute && rt->rt_uses_gateway)
 		goto sr_failed;
 
-	IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS;
+	IPCB(skb)->flags |= IPSKB_FORWARDED;
 	mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
 	if (ip_exceeds_mtu(skb, mtu)) {
 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 37dfacd..eaf720b 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -239,19 +239,23 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
 	struct sk_buff *segs;
 	int ret = 0;
 
-	/* common case: fragmentation of segments is not allowed,
-	 * or seglen is <= mtu
+	/* common case: seglen is <= mtu
 	 */
-	if (((IPCB(skb)->flags & IPSKB_FRAG_SEGS) == 0) ||
-	      skb_gso_validate_mtu(skb, mtu))
+	if (skb_gso_validate_mtu(skb, mtu))
 		return ip_finish_output2(net, sk, skb);
 
-	/* Slowpath -  GSO segment length is exceeding the dst MTU.
+	/* Slowpath -  GSO segment length exceeds the egress MTU.
 	 *
-	 * This can happen in two cases:
-	 * 1) TCP GRO packet, DF bit not set
-	 * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
-	 * from host network stack.
+	 * This can happen in several cases:
+	 *  - Forwarding of a TCP GRO skb, when DF flag is not set.
+	 *  - Forwarding of an skb that arrived on a virtualization interface
+	 *    (virtio-net/vhost/tap) with TSO/GSO size set by other network
+	 *    stack.
+	 *  - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
+	 *    interface with a smaller MTU.
+	 *  - Arriving GRO skb (or GSO skb in a virtualized environment) that is
+	 *    bridged to a NETIF_F_TSO tunnel stacked over an interface with an
+	 *    insufficent MTU.
 	 */
 	features = netif_skb_features(skb);
 	BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
@@ -1579,7 +1583,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
 	}
 
 	oif = arg->bound_dev_if;
-	oif = oif ? : skb->skb_iif;
+	if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
+		oif = skb->skb_iif;
 
 	flowi4_init_output(&fl4, oif,
 			   IP4_REPLY_MARK(net, skb->mark),
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 777bc18..fed3d29 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -63,7 +63,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
 	int pkt_len = skb->len - skb_inner_network_offset(skb);
 	struct net *net = dev_net(rt->dst.dev);
 	struct net_device *dev = skb->dev;
-	int skb_iif = skb->skb_iif;
 	struct iphdr *iph;
 	int err;
 
@@ -73,16 +72,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
 	skb_dst_set(skb, &rt->dst);
 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 
-	if (skb_iif && !(df & htons(IP_DF))) {
-		/* Arrived from an ingress interface, got encapsulated, with
-		 * fragmentation of encapulating frames allowed.
-		 * If skb is gso, the resulting encapsulated network segments
-		 * may exceed dst mtu.
-		 * Allow IP Fragmentation of segments.
-		 */
-		IPCB(skb)->flags |= IPSKB_FRAG_SEGS;
-	}
-
 	/* Push down and install the IP header. */
 	skb_push(skb, sizeof(struct iphdr));
 	skb_reset_network_header(skb);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 5f006e1..27089f5 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1749,7 +1749,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
 		vif->dev->stats.tx_bytes += skb->len;
 	}
 
-	IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS;
+	IPCB(skb)->flags |= IPSKB_FORWARDED;
 
 	/* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
 	 * not only before forwarding, but after forwarding on all output
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
index bf855e6..0c01a270 100644
--- a/net/ipv4/netfilter/nft_dup_ipv4.c
+++ b/net/ipv4/netfilter/nft_dup_ipv4.c
@@ -28,7 +28,7 @@ static void nft_dup_ipv4_eval(const struct nft_expr *expr,
 	struct in_addr gw = {
 		.s_addr = (__force __be32)regs->data[priv->sreg_addr],
 	};
-	int oif = regs->data[priv->sreg_dev];
+	int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1;
 
 	nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif);
 }
@@ -59,7 +59,9 @@ static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
 	struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
 
-	if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+	if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr))
+		goto nla_put_failure;
+	if (priv->sreg_dev &&
 	    nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
 		goto nla_put_failure;
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index bb92a82..8cb3f52 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -755,7 +755,9 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
 			goto reject_redirect;
 	}
 
-	n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
+	n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
+	if (!n)
+		n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
 	if (!IS_ERR(n)) {
 		if (!(n->nud_state & NUD_VALID)) {
 			neigh_event_send(n, NULL);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3251fe7..814af89 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1164,7 +1164,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 
 	err = -EPIPE;
 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
-		goto out_err;
+		goto do_error;
 
 	sg = !!(sk->sk_route_caps & NETIF_F_SG);
 
@@ -1241,7 +1241,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 
 			if (!skb_can_coalesce(skb, i, pfrag->page,
 					      pfrag->offset)) {
-				if (i == sysctl_max_skb_frags || !sg) {
+				if (i >= sysctl_max_skb_frags || !sg) {
 					tcp_mark_push(tp, skb);
 					goto new_segment;
 				}
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 10d728b..ab37c67 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -56,6 +56,7 @@ struct dctcp {
 	u32 next_seq;
 	u32 ce_state;
 	u32 delayed_ack_reserved;
+	u32 loss_cwnd;
 };
 
 static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
@@ -96,6 +97,7 @@ static void dctcp_init(struct sock *sk)
 		ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
 
 		ca->delayed_ack_reserved = 0;
+		ca->loss_cwnd = 0;
 		ca->ce_state = 0;
 
 		dctcp_reset(tp, ca);
@@ -111,9 +113,10 @@ static void dctcp_init(struct sock *sk)
 
 static u32 dctcp_ssthresh(struct sock *sk)
 {
-	const struct dctcp *ca = inet_csk_ca(sk);
+	struct dctcp *ca = inet_csk_ca(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 
+	ca->loss_cwnd = tp->snd_cwnd;
 	return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
 }
 
@@ -308,12 +311,20 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
 	return 0;
 }
 
+static u32 dctcp_cwnd_undo(struct sock *sk)
+{
+	const struct dctcp *ca = inet_csk_ca(sk);
+
+	return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
+}
+
 static struct tcp_congestion_ops dctcp __read_mostly = {
 	.init		= dctcp_init,
 	.in_ack_event   = dctcp_update_alpha,
 	.cwnd_event	= dctcp_cwnd_event,
 	.ssthresh	= dctcp_ssthresh,
 	.cong_avoid	= tcp_reno_cong_avoid,
+	.undo_cwnd	= dctcp_cwnd_undo,
 	.set_state	= dctcp_state,
 	.get_info	= dctcp_get_info,
 	.flags		= TCP_CONG_NEEDS_ECN,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 61b7be3..2259114 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1564,6 +1564,21 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(tcp_add_backlog);
 
+int tcp_filter(struct sock *sk, struct sk_buff *skb)
+{
+	struct tcphdr *th = (struct tcphdr *)skb->data;
+	unsigned int eaten = skb->len;
+	int err;
+
+	err = sk_filter_trim_cap(sk, skb, th->doff * 4);
+	if (!err) {
+		eaten -= skb->len;
+		TCP_SKB_CB(skb)->end_seq -= eaten;
+	}
+	return err;
+}
+EXPORT_SYMBOL(tcp_filter);
+
 /*
  *	From tcp_input.c
  */
@@ -1676,8 +1691,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
 
 	nf_reset(skb);
 
-	if (sk_filter(sk, skb))
+	if (tcp_filter(sk, skb))
 		goto discard_and_relse;
+	th = (const struct tcphdr *)skb->data;
+	iph = ip_hdr(skb);
 
 	skb->dev = NULL;
 
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 1d800ee..77cde2b 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -448,7 +448,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
 	if (__ipv6_addr_needs_scope_id(addr_type))
 		iif = skb->dev->ifindex;
 	else
-		iif = l3mdev_master_ifindex(skb->dev);
+		iif = l3mdev_master_ifindex(skb_dst(skb)->dev);
 
 	/*
 	 *	Must not send error if the source does not uniquely
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 6001e78..59eb4ed 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1366,7 +1366,7 @@ static int __ip6_append_data(struct sock *sk,
 	if (((length > mtu) ||
 	     (skb && skb_is_gso(skb))) &&
 	    (sk->sk_protocol == IPPROTO_UDP) &&
-	    (rt->dst.dev->features & NETIF_F_UFO) &&
+	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
 	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
 					  hh_len, fragheaderlen, exthdrlen,
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index a752052..b283f29 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -88,9 +88,6 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
 
 	uh->len = htons(skb->len);
 
-	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
-	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
-			    | IPSKB_REROUTED);
 	skb_dst_set(skb, dst);
 
 	udp6_set_csum(nocheck, skb, saddr, daddr, skb->len);
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
index 8bfd470..831f86e 100644
--- a/net/ipv6/netfilter/nft_dup_ipv6.c
+++ b/net/ipv6/netfilter/nft_dup_ipv6.c
@@ -26,7 +26,7 @@ static void nft_dup_ipv6_eval(const struct nft_expr *expr,
 {
 	struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
 	struct in6_addr *gw = (struct in6_addr *)&regs->data[priv->sreg_addr];
-	int oif = regs->data[priv->sreg_dev];
+	int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1;
 
 	nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif);
 }
@@ -57,7 +57,9 @@ static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
 	struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
 
-	if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+	if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr))
+		goto nla_put_failure;
+	if (priv->sreg_dev &&
 	    nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
 		goto nla_put_failure;
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e732479..77e6547 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1358,6 +1358,9 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
 	if (rt6->rt6i_flags & RTF_LOCAL)
 		return;
 
+	if (dst_metric_locked(dst, RTAX_MTU))
+		return;
+
 	dst_confirm(dst);
 	mtu = max_t(u32, mtu, IPV6_MIN_MTU);
 	if (mtu >= dst_mtu(dst))
@@ -2723,6 +2726,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
 	   PMTU discouvery.
 	 */
 	if (rt->dst.dev == arg->dev &&
+	    dst_metric_raw(&rt->dst, RTAX_MTU) &&
 	    !dst_metric_locked(&rt->dst, RTAX_MTU)) {
 		if (rt->rt6i_flags & RTF_CACHE) {
 			/* For RTF_CACHE with rt6i_pmtu == 0
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6125330..6e24ed2 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -819,8 +819,12 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
 	fl6.flowi6_proto = IPPROTO_TCP;
 	if (rt6_need_strict(&fl6.daddr) && !oif)
 		fl6.flowi6_oif = tcp_v6_iif(skb);
-	else
-		fl6.flowi6_oif = oif ? : skb->skb_iif;
+	else {
+		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
+			oif = skb->skb_iif;
+
+		fl6.flowi6_oif = oif;
+	}
 
 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
 	fl6.fl6_dport = t1->dest;
@@ -1226,7 +1230,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 	if (skb->protocol == htons(ETH_P_IP))
 		return tcp_v4_do_rcv(sk, skb);
 
-	if (sk_filter(sk, skb))
+	if (tcp_filter(sk, skb))
 		goto discard;
 
 	/*
@@ -1454,8 +1458,10 @@ static int tcp_v6_rcv(struct sk_buff *skb)
 	if (tcp_v6_inbound_md5_hash(sk, skb))
 		goto discard_and_relse;
 
-	if (sk_filter(sk, skb))
+	if (tcp_filter(sk, skb))
 		goto discard_and_relse;
+	th = (const struct tcphdr *)skb->data;
+	hdr = ipv6_hdr(skb);
 
 	skb->dev = NULL;
 
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c3c809b..a6e44ef 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2845,7 +2845,7 @@ static struct genl_family ip_vs_genl_family = {
 	.hdrsize	= 0,
 	.name		= IPVS_GENL_NAME,
 	.version	= IPVS_GENL_VERSION,
-	.maxattr	= IPVS_CMD_MAX,
+	.maxattr	= IPVS_CMD_ATTR_MAX,
 	.netnsok        = true,         /* Make ipvsadm to work on netns */
 };
 
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 1b07578..9350530 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -283,6 +283,7 @@ struct ip_vs_sync_buff {
  */
 static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho)
 {
+	memset(ho, 0, sizeof(*ho));
 	ho->init_seq       = get_unaligned_be32(&no->init_seq);
 	ho->delta          = get_unaligned_be32(&no->delta);
 	ho->previous_delta = get_unaligned_be32(&no->previous_delta);
@@ -917,8 +918,10 @@ static void ip_vs_proc_conn(struct netns_ipvs *ipvs, struct ip_vs_conn_param *pa
 			kfree(param->pe_data);
 	}
 
-	if (opt)
-		memcpy(&cp->in_seq, opt, sizeof(*opt));
+	if (opt) {
+		cp->in_seq = opt->in_seq;
+		cp->out_seq = opt->out_seq;
+	}
 	atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
 	cp->state = state;
 	cp->old_state = cp->state;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index df2f5a3..0f87e5d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -76,6 +76,7 @@ struct conntrack_gc_work {
 	struct delayed_work	dwork;
 	u32			last_bucket;
 	bool			exiting;
+	long			next_gc_run;
 };
 
 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
@@ -83,9 +84,11 @@ static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
 static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
 static __read_mostly bool nf_conntrack_locks_all;
 
+/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
 #define GC_MAX_BUCKETS_DIV	64u
-#define GC_MAX_BUCKETS		8192u
-#define GC_INTERVAL		(5 * HZ)
+/* upper bound of scan intervals */
+#define GC_INTERVAL_MAX		(2 * HZ)
+/* maximum conntracks to evict per gc run */
 #define GC_MAX_EVICTS		256u
 
 static struct conntrack_gc_work conntrack_gc_work;
@@ -936,13 +939,13 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
 static void gc_worker(struct work_struct *work)
 {
 	unsigned int i, goal, buckets = 0, expired_count = 0;
-	unsigned long next_run = GC_INTERVAL;
-	unsigned int ratio, scanned = 0;
 	struct conntrack_gc_work *gc_work;
+	unsigned int ratio, scanned = 0;
+	unsigned long next_run;
 
 	gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
 
-	goal = min(nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV, GC_MAX_BUCKETS);
+	goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
 	i = gc_work->last_bucket;
 
 	do {
@@ -982,17 +985,47 @@ static void gc_worker(struct work_struct *work)
 	if (gc_work->exiting)
 		return;
 
+	/*
+	 * Eviction will normally happen from the packet path, and not
+	 * from this gc worker.
+	 *
+	 * This worker is only here to reap expired entries when system went
+	 * idle after a busy period.
+	 *
+	 * The heuristics below are supposed to balance conflicting goals:
+	 *
+	 * 1. Minimize time until we notice a stale entry
+	 * 2. Maximize scan intervals to not waste cycles
+	 *
+	 * Normally, expired_count will be 0, this increases the next_run time
+	 * to priorize 2) above.
+	 *
+	 * As soon as a timed-out entry is found, move towards 1) and increase
+	 * the scan frequency.
+	 * In case we have lots of evictions next scan is done immediately.
+	 */
 	ratio = scanned ? expired_count * 100 / scanned : 0;
-	if (ratio >= 90 || expired_count == GC_MAX_EVICTS)
+	if (ratio >= 90 || expired_count == GC_MAX_EVICTS) {
+		gc_work->next_gc_run = 0;
 		next_run = 0;
+	} else if (expired_count) {
+		gc_work->next_gc_run /= 2U;
+		next_run = msecs_to_jiffies(1);
+	} else {
+		if (gc_work->next_gc_run < GC_INTERVAL_MAX)
+			gc_work->next_gc_run += msecs_to_jiffies(1);
+
+		next_run = gc_work->next_gc_run;
+	}
 
 	gc_work->last_bucket = i;
-	schedule_delayed_work(&gc_work->dwork, next_run);
+	queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
 }
 
 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
 {
 	INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
+	gc_work->next_gc_run = GC_INTERVAL_MAX;
 	gc_work->exiting = false;
 }
 
@@ -1885,7 +1918,7 @@ int nf_conntrack_init_start(void)
 	nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
 
 	conntrack_gc_work_init(&conntrack_gc_work);
-	schedule_delayed_work(&conntrack_gc_work.dwork, GC_INTERVAL);
+	queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX);
 
 	return 0;
 
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 336e215..7341adf 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -138,9 +138,14 @@ __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
 
 	for (i = 0; i < nf_ct_helper_hsize; i++) {
 		hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) {
-			if (!strcmp(h->name, name) &&
-			    h->tuple.src.l3num == l3num &&
-			    h->tuple.dst.protonum == protonum)
+			if (strcmp(h->name, name))
+				continue;
+
+			if (h->tuple.src.l3num != NFPROTO_UNSPEC &&
+			    h->tuple.src.l3num != l3num)
+				continue;
+
+			if (h->tuple.dst.protonum == protonum)
 				return h;
 		}
 	}
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 621b81c..c3fc14e 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1436,9 +1436,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
 		handler = &sip_handlers[i];
 		if (handler->request == NULL)
 			continue;
-		if (*datalen < handler->len ||
+		if (*datalen < handler->len + 2 ||
 		    strncasecmp(*dptr, handler->method, handler->len))
 			continue;
+		if ((*dptr)[handler->len] != ' ' ||
+		    !isalpha((*dptr)[handler->len+1]))
+			continue;
 
 		if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
 				      &matchoff, &matchlen) <= 0) {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 24db222..026581b 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2956,12 +2956,14 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
 
 	err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
 	if (err < 0)
-		goto err2;
+		goto err3;
 
 	list_add_tail_rcu(&set->list, &table->sets);
 	table->use++;
 	return 0;
 
+err3:
+	ops->destroy(set);
 err2:
 	kfree(set);
 err1:
@@ -3452,14 +3454,15 @@ void *nft_set_elem_init(const struct nft_set *set,
 	return elem;
 }
 
-void nft_set_elem_destroy(const struct nft_set *set, void *elem)
+void nft_set_elem_destroy(const struct nft_set *set, void *elem,
+			  bool destroy_expr)
 {
 	struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
 
 	nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE);
 	if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
 		nft_data_uninit(nft_set_ext_data(ext), set->dtype);
-	if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
+	if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
 		nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext));
 
 	kfree(elem);
@@ -3565,6 +3568,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
 		dreg = nft_type_to_reg(set->dtype);
 		list_for_each_entry(binding, &set->bindings, list) {
 			struct nft_ctx bind_ctx = {
+				.net	= ctx->net,
 				.afi	= ctx->afi,
 				.table	= ctx->table,
 				.chain	= (struct nft_chain *)binding->chain,
@@ -3812,7 +3816,7 @@ void nft_set_gc_batch_release(struct rcu_head *rcu)
 
 	gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu);
 	for (i = 0; i < gcb->head.cnt; i++)
-		nft_set_elem_destroy(gcb->head.set, gcb->elems[i]);
+		nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true);
 	kfree(gcb);
 }
 EXPORT_SYMBOL_GPL(nft_set_gc_batch_release);
@@ -4030,7 +4034,7 @@ static void nf_tables_commit_release(struct nft_trans *trans)
 		break;
 	case NFT_MSG_DELSETELEM:
 		nft_set_elem_destroy(nft_trans_elem_set(trans),
-				     nft_trans_elem(trans).priv);
+				     nft_trans_elem(trans).priv, true);
 		break;
 	}
 	kfree(trans);
@@ -4171,7 +4175,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
 		break;
 	case NFT_MSG_NEWSETELEM:
 		nft_set_elem_destroy(nft_trans_elem_set(trans),
-				     nft_trans_elem(trans).priv);
+				     nft_trans_elem(trans).priv, true);
 		break;
 	}
 	kfree(trans);
@@ -4421,7 +4425,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
  *	Otherwise a 0 is returned and the attribute value is stored in the
  *	destination variable.
  */
-unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
+int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
 {
 	u32 val;
 
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 517f087..31ca947 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -44,18 +44,22 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
 				 &regs->data[priv->sreg_key],
 				 &regs->data[priv->sreg_data],
 				 timeout, GFP_ATOMIC);
-	if (elem == NULL) {
-		if (set->size)
-			atomic_dec(&set->nelems);
-		return NULL;
-	}
+	if (elem == NULL)
+		goto err1;
 
 	ext = nft_set_elem_ext(set, elem);
 	if (priv->expr != NULL &&
 	    nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0)
-		return NULL;
+		goto err2;
 
 	return elem;
+
+err2:
+	nft_set_elem_destroy(set, elem, false);
+err1:
+	if (set->size)
+		atomic_dec(&set->nelems);
+	return NULL;
 }
 
 static void nft_dynset_eval(const struct nft_expr *expr,
@@ -139,6 +143,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
 			return PTR_ERR(set);
 	}
 
+	if (set->ops->update == NULL)
+		return -EOPNOTSUPP;
+
 	if (set->flags & NFT_SET_CONSTANT)
 		return -EBUSY;
 
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 3794cb2..a3dface 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -98,7 +98,7 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key,
 			    const struct nft_set_ext **ext)
 {
 	struct nft_hash *priv = nft_set_priv(set);
-	struct nft_hash_elem *he;
+	struct nft_hash_elem *he, *prev;
 	struct nft_hash_cmp_arg arg = {
 		.genmask = NFT_GENMASK_ANY,
 		.set	 = set,
@@ -112,15 +112,24 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key,
 	he = new(set, expr, regs);
 	if (he == NULL)
 		goto err1;
-	if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node,
-					 nft_hash_params))
+
+	prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
+						nft_hash_params);
+	if (IS_ERR(prev))
 		goto err2;
+
+	/* Another cpu may race to insert the element with the same key */
+	if (prev) {
+		nft_set_elem_destroy(set, he, true);
+		he = prev;
+	}
+
 out:
 	*ext = &he->ext;
 	return true;
 
 err2:
-	nft_set_elem_destroy(set, he);
+	nft_set_elem_destroy(set, he, true);
 err1:
 	return false;
 }
@@ -332,7 +341,7 @@ static int nft_hash_init(const struct nft_set *set,
 
 static void nft_hash_elem_destroy(void *ptr, void *arg)
 {
-	nft_set_elem_destroy((const struct nft_set *)arg, ptr);
+	nft_set_elem_destroy((const struct nft_set *)arg, ptr, true);
 }
 
 static void nft_hash_destroy(const struct nft_set *set)
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 38b5bda..36493a7 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -266,7 +266,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
 	while ((node = priv->root.rb_node) != NULL) {
 		rb_erase(node, &priv->root);
 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
-		nft_set_elem_destroy(set, rbe);
+		nft_set_elem_destroy(set, rbe, true);
 	}
 }
 
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index 69f78e9..b83e158 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -44,7 +44,7 @@ connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
 	u_int32_t newmark;
 
 	ct = nf_ct_get(skb, &ctinfo);
-	if (ct == NULL)
+	if (ct == NULL || nf_ct_is_untracked(ct))
 		return XT_CONTINUE;
 
 	switch (info->mode) {
@@ -97,7 +97,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par)
 	const struct nf_conn *ct;
 
 	ct = nf_ct_get(skb, &ctinfo);
-	if (ct == NULL)
+	if (ct == NULL || nf_ct_is_untracked(ct))
 		return false;
 
 	return ((ct->mark & info->mask) == info->mark) ^ info->invert;
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index b2f0e98..a554624 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -178,11 +178,8 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
 		}
 		cb->args[1] = i;
 	} else {
-		if (req->sdiag_protocol >= MAX_LINKS) {
-			read_unlock(&nl_table_lock);
-			rcu_read_unlock();
+		if (req->sdiag_protocol >= MAX_LINKS)
 			return -ENOENT;
-		}
 
 		err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
 	}
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 55447ae..b09d475 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -404,7 +404,7 @@ int __genl_register_family(struct genl_family *family)
 
 	err = genl_validate_assign_mc_groups(family);
 	if (err)
-		goto errout_locked;
+		goto errout_free;
 
 	list_add_tail(&family->family_list, genl_family_chain(family->id));
 	genl_unlock_all();
@@ -417,6 +417,8 @@ int __genl_register_family(struct genl_family *family)
 
 	return 0;
 
+errout_free:
+	kfree(family->attrbuf);
 errout_locked:
 	genl_unlock_all();
 errout:
diff --git a/net/sctp/input.c b/net/sctp/input.c
index a2ea1d1..a01a56e 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -181,9 +181,10 @@ int sctp_rcv(struct sk_buff *skb)
 	 * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
 	 */
 	if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) {
-		if (asoc) {
-			sctp_association_put(asoc);
+		if (transport) {
+			sctp_transport_put(transport);
 			asoc = NULL;
+			transport = NULL;
 		} else {
 			sctp_endpoint_put(ep);
 			ep = NULL;
@@ -269,8 +270,8 @@ int sctp_rcv(struct sk_buff *skb)
 	bh_unlock_sock(sk);
 
 	/* Release the asoc/ep ref we took in the lookup calls. */
-	if (asoc)
-		sctp_association_put(asoc);
+	if (transport)
+		sctp_transport_put(transport);
 	else
 		sctp_endpoint_put(ep);
 
@@ -283,8 +284,8 @@ int sctp_rcv(struct sk_buff *skb)
 
 discard_release:
 	/* Release the asoc/ep ref we took in the lookup calls. */
-	if (asoc)
-		sctp_association_put(asoc);
+	if (transport)
+		sctp_transport_put(transport);
 	else
 		sctp_endpoint_put(ep);
 
@@ -300,6 +301,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
 	struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
 	struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
+	struct sctp_transport *t = chunk->transport;
 	struct sctp_ep_common *rcvr = NULL;
 	int backloged = 0;
 
@@ -351,7 +353,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 done:
 	/* Release the refs we took in sctp_add_backlog */
 	if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
-		sctp_association_put(sctp_assoc(rcvr));
+		sctp_transport_put(t);
 	else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
 		sctp_endpoint_put(sctp_ep(rcvr));
 	else
@@ -363,6 +365,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
 	struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
+	struct sctp_transport *t = chunk->transport;
 	struct sctp_ep_common *rcvr = chunk->rcvr;
 	int ret;
 
@@ -373,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
 		 * from us
 		 */
 		if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
-			sctp_association_hold(sctp_assoc(rcvr));
+			sctp_transport_hold(t);
 		else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
 			sctp_endpoint_hold(sctp_ep(rcvr));
 		else
@@ -537,15 +540,15 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
 	return sk;
 
 out:
-	sctp_association_put(asoc);
+	sctp_transport_put(transport);
 	return NULL;
 }
 
 /* Common cleanup code for icmp/icmpv6 error handler. */
-void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
+void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
 {
 	bh_unlock_sock(sk);
-	sctp_association_put(asoc);
+	sctp_transport_put(t);
 }
 
 /*
@@ -641,7 +644,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
 	}
 
 out_unlock:
-	sctp_err_finish(sk, asoc);
+	sctp_err_finish(sk, transport);
 }
 
 /*
@@ -952,11 +955,8 @@ static struct sctp_association *__sctp_lookup_association(
 		goto out;
 
 	asoc = t->asoc;
-	sctp_association_hold(asoc);
 	*pt = t;
 
-	sctp_transport_put(t);
-
 out:
 	return asoc;
 }
@@ -986,7 +986,7 @@ int sctp_has_association(struct net *net,
 	struct sctp_transport *transport;
 
 	if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
-		sctp_association_put(asoc);
+		sctp_transport_put(transport);
 		return 1;
 	}
 
@@ -1021,7 +1021,6 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
 	struct sctphdr *sh = sctp_hdr(skb);
 	union sctp_params params;
 	sctp_init_chunk_t *init;
-	struct sctp_transport *transport;
 	struct sctp_af *af;
 
 	/*
@@ -1052,7 +1051,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
 
 		af->from_addr_param(paddr, params.addr, sh->source, 0);
 
-		asoc = __sctp_lookup_association(net, laddr, paddr, &transport);
+		asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
 		if (asoc)
 			return asoc;
 	}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f473779..176af30 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -198,7 +198,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 	}
 
 out_unlock:
-	sctp_err_finish(sk, asoc);
+	sctp_err_finish(sk, transport);
 out:
 	if (likely(idev != NULL))
 		in6_dev_put(idev);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9fbb6feb..f23ad91 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1214,9 +1214,12 @@ static int __sctp_connect(struct sock *sk,
 
 	timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
 
-	err = sctp_wait_for_connect(asoc, &timeo);
-	if ((err == 0 || err == -EINPROGRESS) && assoc_id)
+	if (assoc_id)
 		*assoc_id = asoc->assoc_id;
+	err = sctp_wait_for_connect(asoc, &timeo);
+	/* Note: the asoc may be freed after the return of
+	 * sctp_wait_for_connect.
+	 */
 
 	/* Don't free association on exit. */
 	asoc = NULL;
@@ -4282,19 +4285,18 @@ static void sctp_shutdown(struct sock *sk, int how)
 {
 	struct net *net = sock_net(sk);
 	struct sctp_endpoint *ep;
-	struct sctp_association *asoc;
 
 	if (!sctp_style(sk, TCP))
 		return;
 
-	if (how & SEND_SHUTDOWN) {
+	ep = sctp_sk(sk)->ep;
+	if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
+		struct sctp_association *asoc;
+
 		sk->sk_state = SCTP_SS_CLOSING;
-		ep = sctp_sk(sk)->ep;
-		if (!list_empty(&ep->asocs)) {
-			asoc = list_entry(ep->asocs.next,
-					  struct sctp_association, asocs);
-			sctp_primitive_SHUTDOWN(net, asoc, NULL);
-		}
+		asoc = list_entry(ep->asocs.next,
+				  struct sctp_association, asocs);
+		sctp_primitive_SHUTDOWN(net, asoc, NULL);
 	}
 }
 
@@ -4480,12 +4482,9 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
 	if (!transport || !sctp_transport_hold(transport))
 		goto out;
 
-	sctp_association_hold(transport->asoc);
-	sctp_transport_put(transport);
-
 	rcu_read_unlock();
 	err = cb(transport, p);
-	sctp_association_put(transport->asoc);
+	sctp_transport_put(transport);
 
 out:
 	return err;
diff --git a/net/socket.c b/net/socket.c
index 5a9bf5e..73dc69f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -341,8 +341,23 @@ static const struct xattr_handler sockfs_xattr_handler = {
 	.get = sockfs_xattr_get,
 };
 
+static int sockfs_security_xattr_set(const struct xattr_handler *handler,
+				     struct dentry *dentry, struct inode *inode,
+				     const char *suffix, const void *value,
+				     size_t size, int flags)
+{
+	/* Handled by LSM. */
+	return -EAGAIN;
+}
+
+static const struct xattr_handler sockfs_security_xattr_handler = {
+	.prefix = XATTR_SECURITY_PREFIX,
+	.set = sockfs_security_xattr_set,
+};
+
 static const struct xattr_handler *sockfs_xattr_handlers[] = {
 	&sockfs_xattr_handler,
+	&sockfs_security_xattr_handler,
 	NULL
 };
 
@@ -2038,6 +2053,8 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
 		if (err)
 			break;
 		++datagrams;
+		if (msg_data_left(&msg_sys))
+			break;
 		cond_resched();
 	}
 
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index c3f6523..3bc1d61 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -1002,14 +1002,8 @@ static void svc_age_temp_xprts(unsigned long closure)
 void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
 {
 	struct svc_xprt *xprt;
-	struct svc_sock *svsk;
-	struct socket *sock;
 	struct list_head *le, *next;
 	LIST_HEAD(to_be_closed);
-	struct linger no_linger = {
-		.l_onoff = 1,
-		.l_linger = 0,
-	};
 
 	spin_lock_bh(&serv->sv_lock);
 	list_for_each_safe(le, next, &serv->sv_tempsocks) {
@@ -1027,10 +1021,7 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
 		list_del_init(le);
 		xprt = list_entry(le, struct svc_xprt, xpt_list);
 		dprintk("svc_age_temp_xprts_now: closing %p\n", xprt);
-		svsk = container_of(xprt, struct svc_sock, sk_xprt);
-		sock = svsk->sk_sock;
-		kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
-				  (char *)&no_linger, sizeof(no_linger));
+		xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
 		svc_close_xprt(xprt);
 	}
 }
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 57625f6..a4bc982 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -438,6 +438,21 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
 	return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
 }
 
+static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt)
+{
+	struct svc_sock *svsk;
+	struct socket *sock;
+	struct linger no_linger = {
+		.l_onoff = 1,
+		.l_linger = 0,
+	};
+
+	svsk = container_of(xprt, struct svc_sock, sk_xprt);
+	sock = svsk->sk_sock;
+	kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
+			  (char *)&no_linger, sizeof(no_linger));
+}
+
 /*
  * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo
  */
@@ -648,6 +663,10 @@ static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt)
 	return NULL;
 }
 
+static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt)
+{
+}
+
 static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
 				       struct net *net,
 				       struct sockaddr *sa, int salen,
@@ -667,6 +686,7 @@ static struct svc_xprt_ops svc_udp_ops = {
 	.xpo_has_wspace = svc_udp_has_wspace,
 	.xpo_accept = svc_udp_accept,
 	.xpo_secure_port = svc_sock_secure_port,
+	.xpo_kill_temp_xprt = svc_udp_kill_temp_xprt,
 };
 
 static struct svc_xprt_class svc_udp_class = {
@@ -1242,6 +1262,7 @@ static struct svc_xprt_ops svc_tcp_ops = {
 	.xpo_has_wspace = svc_tcp_has_wspace,
 	.xpo_accept = svc_tcp_accept,
 	.xpo_secure_port = svc_sock_secure_port,
+	.xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt,
 };
 
 static struct svc_xprt_class svc_tcp_class = {
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 6864fb9..1334de2 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -67,6 +67,7 @@ static void svc_rdma_detach(struct svc_xprt *xprt);
 static void svc_rdma_free(struct svc_xprt *xprt);
 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
 static int svc_rdma_secure_port(struct svc_rqst *);
+static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
 
 static struct svc_xprt_ops svc_rdma_ops = {
 	.xpo_create = svc_rdma_create,
@@ -79,6 +80,7 @@ static struct svc_xprt_ops svc_rdma_ops = {
 	.xpo_has_wspace = svc_rdma_has_wspace,
 	.xpo_accept = svc_rdma_accept,
 	.xpo_secure_port = svc_rdma_secure_port,
+	.xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
 };
 
 struct svc_xprt_class svc_rdma_class = {
@@ -1317,6 +1319,10 @@ static int svc_rdma_secure_port(struct svc_rqst *rqstp)
 	return 1;
 }
 
+static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
+{
+}
+
 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
 {
 	struct ib_send_wr *bad_wr, *n_wr;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 145082e..5d1c14a 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2812,7 +2812,8 @@ static int unix_seq_show(struct seq_file *seq, void *v)
 				i++;
 			}
 			for ( ; i < len; i++)
-				seq_putc(seq, u->addr->name->sun_path[i]);
+				seq_putc(seq, u->addr->name->sun_path[i] ?:
+					 '@');
 		}
 		unix_state_unlock(s);
 		seq_putc(seq, '\n');
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 12b7304..72c5867 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -27,6 +27,7 @@
 hostprogs-y += test_current_task_under_cgroup
 hostprogs-y += trace_event
 hostprogs-y += sampleip
+hostprogs-y += tc_l2_redirect
 
 test_verifier-objs := test_verifier.o libbpf.o
 test_maps-objs := test_maps.o libbpf.o
@@ -56,6 +57,7 @@
 				       test_current_task_under_cgroup_user.o
 trace_event-objs := bpf_load.o libbpf.o trace_event_user.o
 sampleip-objs := bpf_load.o libbpf.o sampleip_user.o
+tc_l2_redirect-objs := bpf_load.o libbpf.o tc_l2_redirect_user.o
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
@@ -72,6 +74,7 @@
 always += trace_output_kern.o
 always += tcbpf1_kern.o
 always += tcbpf2_kern.o
+always += tc_l2_redirect_kern.o
 always += lathist_kern.o
 always += offwaketime_kern.o
 always += spintest_kern.o
@@ -111,6 +114,7 @@
 HOSTLOADLIBES_test_current_task_under_cgroup += -lelf
 HOSTLOADLIBES_trace_event += -lelf
 HOSTLOADLIBES_sampleip += -lelf
+HOSTLOADLIBES_tc_l2_redirect += -l elf
 
 # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
 #  make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
diff --git a/samples/bpf/tc_l2_redirect.sh b/samples/bpf/tc_l2_redirect.sh
new file mode 100755
index 0000000..80a0559
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect.sh
@@ -0,0 +1,173 @@
+#!/bin/bash
+
+[[ -z $TC ]] && TC='tc'
+[[ -z $IP ]] && IP='ip'
+
+REDIRECT_USER='./tc_l2_redirect'
+REDIRECT_BPF='./tc_l2_redirect_kern.o'
+
+RP_FILTER=$(< /proc/sys/net/ipv4/conf/all/rp_filter)
+IPV6_FORWARDING=$(< /proc/sys/net/ipv6/conf/all/forwarding)
+
+function config_common {
+	local tun_type=$1
+
+	$IP netns add ns1
+	$IP netns add ns2
+	$IP link add ve1 type veth peer name vens1
+	$IP link add ve2 type veth peer name vens2
+	$IP link set dev ve1 up
+	$IP link set dev ve2 up
+	$IP link set dev ve1 mtu 1500
+	$IP link set dev ve2 mtu 1500
+	$IP link set dev vens1 netns ns1
+	$IP link set dev vens2 netns ns2
+
+	$IP -n ns1 link set dev lo up
+	$IP -n ns1 link set dev vens1 up
+	$IP -n ns1 addr add 10.1.1.101/24 dev vens1
+	$IP -n ns1 addr add 2401:db01::65/64 dev vens1 nodad
+	$IP -n ns1 route add default via 10.1.1.1 dev vens1
+	$IP -n ns1 route add default via 2401:db01::1 dev vens1
+
+	$IP -n ns2 link set dev lo up
+	$IP -n ns2 link set dev vens2 up
+	$IP -n ns2 addr add 10.2.1.102/24 dev vens2
+	$IP -n ns2 addr add 2401:db02::66/64 dev vens2 nodad
+	$IP -n ns2 addr add 10.10.1.102 dev lo
+	$IP -n ns2 addr add 2401:face::66/64 dev lo nodad
+	$IP -n ns2 link add ipt2 type ipip local 10.2.1.102 remote 10.2.1.1
+	$IP -n ns2 link add ip6t2 type ip6tnl mode any local 2401:db02::66 remote 2401:db02::1
+	$IP -n ns2 link set dev ipt2 up
+	$IP -n ns2 link set dev ip6t2 up
+	$IP netns exec ns2 $TC qdisc add dev vens2 clsact
+	$IP netns exec ns2 $TC filter add dev vens2 ingress bpf da obj $REDIRECT_BPF sec drop_non_tun_vip
+	if [[ $tun_type == "ipip" ]]; then
+		$IP -n ns2 route add 10.1.1.0/24 dev ipt2
+		$IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0
+		$IP netns exec ns2 sysctl -q -w net.ipv4.conf.ipt2.rp_filter=0
+	else
+		$IP -n ns2 route add 10.1.1.0/24 dev ip6t2
+		$IP -n ns2 route add 2401:db01::/64 dev ip6t2
+		$IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0
+		$IP netns exec ns2 sysctl -q -w net.ipv4.conf.ip6t2.rp_filter=0
+	fi
+
+	$IP addr add 10.1.1.1/24 dev ve1
+	$IP addr add 2401:db01::1/64 dev ve1 nodad
+	$IP addr add 10.2.1.1/24 dev ve2
+	$IP addr add 2401:db02::1/64 dev ve2 nodad
+
+	$TC qdisc add dev ve2 clsact
+	$TC filter add dev ve2 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_forward
+
+	sysctl -q -w net.ipv4.conf.all.rp_filter=0
+	sysctl -q -w net.ipv6.conf.all.forwarding=1
+}
+
+function cleanup {
+	set +e
+	[[ -z $DEBUG ]] || set +x
+	$IP netns delete ns1 >& /dev/null
+	$IP netns delete ns2 >& /dev/null
+	$IP link del ve1 >& /dev/null
+	$IP link del ve2 >& /dev/null
+	$IP link del ipt >& /dev/null
+	$IP link del ip6t >& /dev/null
+	sysctl -q -w net.ipv4.conf.all.rp_filter=$RP_FILTER
+	sysctl -q -w net.ipv6.conf.all.forwarding=$IPV6_FORWARDING
+	rm -f /sys/fs/bpf/tc/globals/tun_iface
+	[[ -z $DEBUG ]] || set -x
+	set -e
+}
+
+function l2_to_ipip {
+	echo -n "l2_to_ipip $1: "
+
+	local dir=$1
+
+	config_common ipip
+
+	$IP link add ipt type ipip external
+	$IP link set dev ipt up
+	sysctl -q -w net.ipv4.conf.ipt.rp_filter=0
+	sysctl -q -w net.ipv4.conf.ipt.forwarding=1
+
+	if [[ $dir == "egress" ]]; then
+		$IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2
+		$TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect
+		sysctl -q -w net.ipv4.conf.ve1.forwarding=1
+	else
+		$TC qdisc add dev ve1 clsact
+		$TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect
+	fi
+
+	$REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ipt/ifindex)
+
+	$IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null
+
+	if [[ $dir == "egress" ]]; then
+		# test direct egress to ve2 (i.e. not forwarding from
+		# ve1 to ve2).
+		ping -c1 10.10.1.102 >& /dev/null
+	fi
+
+	cleanup
+
+	echo "OK"
+}
+
+function l2_to_ip6tnl {
+	echo -n "l2_to_ip6tnl $1: "
+
+	local dir=$1
+
+	config_common ip6tnl
+
+	$IP link add ip6t type ip6tnl mode any external
+	$IP link set dev ip6t up
+	sysctl -q -w net.ipv4.conf.ip6t.rp_filter=0
+	sysctl -q -w net.ipv4.conf.ip6t.forwarding=1
+
+	if [[ $dir == "egress" ]]; then
+		$IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2
+		$IP route add 2401:face::/64 via 2401:db02::66 dev ve2
+		$TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect
+		sysctl -q -w net.ipv4.conf.ve1.forwarding=1
+	else
+		$TC qdisc add dev ve1 clsact
+		$TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect
+	fi
+
+	$REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ip6t/ifindex)
+
+	$IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null
+	$IP netns exec ns1 ping -6 -c1 2401:face::66 >& /dev/null
+
+	if [[ $dir == "egress" ]]; then
+		# test direct egress to ve2 (i.e. not forwarding from
+		# ve1 to ve2).
+		ping -c1 10.10.1.102 >& /dev/null
+		ping -6 -c1 2401:face::66 >& /dev/null
+	fi
+
+	cleanup
+
+	echo "OK"
+}
+
+cleanup
+test_names="l2_to_ipip l2_to_ip6tnl"
+test_dirs="ingress egress"
+if [[ $# -ge 2 ]]; then
+	test_names=$1
+	test_dirs=$2
+elif [[ $# -ge 1 ]]; then
+	test_names=$1
+fi
+
+for t in $test_names; do
+	for d in $test_dirs; do
+		$t $d
+	done
+done
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c
new file mode 100644
index 0000000..92a4472
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect_kern.c
@@ -0,0 +1,236 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/filter.h>
+#include <uapi/linux/pkt_cls.h>
+#include <net/ipv6.h>
+#include "bpf_helpers.h"
+
+#define _htonl __builtin_bswap32
+
+#define PIN_GLOBAL_NS		2
+struct bpf_elf_map {
+	__u32 type;
+	__u32 size_key;
+	__u32 size_value;
+	__u32 max_elem;
+	__u32 flags;
+	__u32 id;
+	__u32 pinning;
+};
+
+/* copy of 'struct ethhdr' without __packed */
+struct eth_hdr {
+	unsigned char   h_dest[ETH_ALEN];
+	unsigned char   h_source[ETH_ALEN];
+	unsigned short  h_proto;
+};
+
+struct bpf_elf_map SEC("maps") tun_iface = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.size_key = sizeof(int),
+	.size_value = sizeof(int),
+	.pinning = PIN_GLOBAL_NS,
+	.max_elem = 1,
+};
+
+static __always_inline bool is_vip_addr(__be16 eth_proto, __be32 daddr)
+{
+	if (eth_proto == htons(ETH_P_IP))
+		return (_htonl(0xffffff00) & daddr) == _htonl(0x0a0a0100);
+	else if (eth_proto == htons(ETH_P_IPV6))
+		return (daddr == _htonl(0x2401face));
+
+	return false;
+}
+
+SEC("l2_to_iptun_ingress_forward")
+int _l2_to_iptun_ingress_forward(struct __sk_buff *skb)
+{
+	struct bpf_tunnel_key tkey = {};
+	void *data = (void *)(long)skb->data;
+	struct eth_hdr *eth = data;
+	void *data_end = (void *)(long)skb->data_end;
+	int key = 0, *ifindex;
+
+	int ret;
+
+	if (data + sizeof(*eth) > data_end)
+		return TC_ACT_OK;
+
+	ifindex = bpf_map_lookup_elem(&tun_iface, &key);
+	if (!ifindex)
+		return TC_ACT_OK;
+
+	if (eth->h_proto == htons(ETH_P_IP)) {
+		char fmt4[] = "ingress forward to ifindex:%d daddr4:%x\n";
+		struct iphdr *iph = data + sizeof(*eth);
+
+		if (data + sizeof(*eth) + sizeof(*iph) > data_end)
+			return TC_ACT_OK;
+
+		if (iph->protocol != IPPROTO_IPIP)
+			return TC_ACT_OK;
+
+		bpf_trace_printk(fmt4, sizeof(fmt4), *ifindex,
+				 _htonl(iph->daddr));
+		return bpf_redirect(*ifindex, BPF_F_INGRESS);
+	} else if (eth->h_proto == htons(ETH_P_IPV6)) {
+		char fmt6[] = "ingress forward to ifindex:%d daddr6:%x::%x\n";
+		struct ipv6hdr *ip6h = data + sizeof(*eth);
+
+		if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
+			return TC_ACT_OK;
+
+		if (ip6h->nexthdr != IPPROTO_IPIP &&
+		    ip6h->nexthdr != IPPROTO_IPV6)
+			return TC_ACT_OK;
+
+		bpf_trace_printk(fmt6, sizeof(fmt6), *ifindex,
+				 _htonl(ip6h->daddr.s6_addr32[0]),
+				 _htonl(ip6h->daddr.s6_addr32[3]));
+		return bpf_redirect(*ifindex, BPF_F_INGRESS);
+	}
+
+	return TC_ACT_OK;
+}
+
+SEC("l2_to_iptun_ingress_redirect")
+int _l2_to_iptun_ingress_redirect(struct __sk_buff *skb)
+{
+	struct bpf_tunnel_key tkey = {};
+	void *data = (void *)(long)skb->data;
+	struct eth_hdr *eth = data;
+	void *data_end = (void *)(long)skb->data_end;
+	int key = 0, *ifindex;
+
+	int ret;
+
+	if (data + sizeof(*eth) > data_end)
+		return TC_ACT_OK;
+
+	ifindex = bpf_map_lookup_elem(&tun_iface, &key);
+	if (!ifindex)
+		return TC_ACT_OK;
+
+	if (eth->h_proto == htons(ETH_P_IP)) {
+		char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n";
+		struct iphdr *iph = data + sizeof(*eth);
+		__be32 daddr = iph->daddr;
+
+		if (data + sizeof(*eth) + sizeof(*iph) > data_end)
+			return TC_ACT_OK;
+
+		if (!is_vip_addr(eth->h_proto, daddr))
+			return TC_ACT_OK;
+
+		bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(daddr), *ifindex);
+	} else {
+		return TC_ACT_OK;
+	}
+
+	tkey.tunnel_id = 10000;
+	tkey.tunnel_ttl = 64;
+	tkey.remote_ipv4 = 0x0a020166; /* 10.2.1.102 */
+	bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), 0);
+	return bpf_redirect(*ifindex, 0);
+}
+
+SEC("l2_to_ip6tun_ingress_redirect")
+int _l2_to_ip6tun_ingress_redirect(struct __sk_buff *skb)
+{
+	struct bpf_tunnel_key tkey = {};
+	void *data = (void *)(long)skb->data;
+	struct eth_hdr *eth = data;
+	void *data_end = (void *)(long)skb->data_end;
+	int key = 0, *ifindex;
+
+	if (data + sizeof(*eth) > data_end)
+		return TC_ACT_OK;
+
+	ifindex = bpf_map_lookup_elem(&tun_iface, &key);
+	if (!ifindex)
+		return TC_ACT_OK;
+
+	if (eth->h_proto == htons(ETH_P_IP)) {
+		char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n";
+		struct iphdr *iph = data + sizeof(*eth);
+
+		if (data + sizeof(*eth) + sizeof(*iph) > data_end)
+			return TC_ACT_OK;
+
+		if (!is_vip_addr(eth->h_proto, iph->daddr))
+			return TC_ACT_OK;
+
+		bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(iph->daddr),
+				 *ifindex);
+	} else if (eth->h_proto == htons(ETH_P_IPV6)) {
+		char fmt6[] = "e/ingress redirect daddr6:%x to ifindex:%d\n";
+		struct ipv6hdr *ip6h = data + sizeof(*eth);
+
+		if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
+			return TC_ACT_OK;
+
+		if (!is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0]))
+			return TC_ACT_OK;
+
+		bpf_trace_printk(fmt6, sizeof(fmt6),
+				 _htonl(ip6h->daddr.s6_addr32[0]), *ifindex);
+	} else {
+		return TC_ACT_OK;
+	}
+
+	tkey.tunnel_id = 10000;
+	tkey.tunnel_ttl = 64;
+	/* 2401:db02:0:0:0:0:0:66 */
+	tkey.remote_ipv6[0] = _htonl(0x2401db02);
+	tkey.remote_ipv6[1] = 0;
+	tkey.remote_ipv6[2] = 0;
+	tkey.remote_ipv6[3] = _htonl(0x00000066);
+	bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), BPF_F_TUNINFO_IPV6);
+	return bpf_redirect(*ifindex, 0);
+}
+
+SEC("drop_non_tun_vip")
+int _drop_non_tun_vip(struct __sk_buff *skb)
+{
+	struct bpf_tunnel_key tkey = {};
+	void *data = (void *)(long)skb->data;
+	struct eth_hdr *eth = data;
+	void *data_end = (void *)(long)skb->data_end;
+
+	if (data + sizeof(*eth) > data_end)
+		return TC_ACT_OK;
+
+	if (eth->h_proto == htons(ETH_P_IP)) {
+		struct iphdr *iph = data + sizeof(*eth);
+
+		if (data + sizeof(*eth) + sizeof(*iph) > data_end)
+			return TC_ACT_OK;
+
+		if (is_vip_addr(eth->h_proto, iph->daddr))
+			return TC_ACT_SHOT;
+	} else if (eth->h_proto == htons(ETH_P_IPV6)) {
+		struct ipv6hdr *ip6h = data + sizeof(*eth);
+
+		if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
+			return TC_ACT_OK;
+
+		if (is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0]))
+			return TC_ACT_SHOT;
+	}
+
+	return TC_ACT_OK;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tc_l2_redirect_user.c b/samples/bpf/tc_l2_redirect_user.c
new file mode 100644
index 0000000..4013c53
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect_user.c
@@ -0,0 +1,73 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/unistd.h>
+#include <linux/bpf.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+
+#include "libbpf.h"
+
+static void usage(void)
+{
+	printf("Usage: tc_l2_ipip_redirect [...]\n");
+	printf("       -U <file>   Update an already pinned BPF array\n");
+	printf("       -i <ifindex> Interface index\n");
+	printf("       -h          Display this help\n");
+}
+
+int main(int argc, char **argv)
+{
+	const char *pinned_file = NULL;
+	int ifindex = -1;
+	int array_key = 0;
+	int array_fd = -1;
+	int ret = -1;
+	int opt;
+
+	while ((opt = getopt(argc, argv, "F:U:i:")) != -1) {
+		switch (opt) {
+		/* General args */
+		case 'U':
+			pinned_file = optarg;
+			break;
+		case 'i':
+			ifindex = atoi(optarg);
+			break;
+		default:
+			usage();
+			goto out;
+		}
+	}
+
+	if (ifindex < 0 || !pinned_file) {
+		usage();
+		goto out;
+	}
+
+	array_fd = bpf_obj_get(pinned_file);
+	if (array_fd < 0) {
+		fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n",
+			pinned_file, strerror(errno), errno);
+		goto out;
+	}
+
+	/* bpf_tunnel_key.remote_ipv4 expects host byte orders */
+	ret = bpf_update_elem(array_fd, &array_key, &ifindex, 0);
+	if (ret) {
+		perror("bpf_update_elem");
+		goto out;
+	}
+
+out:
+	if (array_fd != -1)
+		close(array_fd);
+	return ret;
+}
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index de46ab0..7675d11 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -159,7 +159,8 @@
 $(obj)/%.i: $(src)/%.c FORCE
 	$(call if_changed_dep,cpp_i_c)
 
-cmd_gensymtypes =                                                           \
+# These mirror gensymtypes_S and co below, keep them in synch.
+cmd_gensymtypes_c =                                                         \
     $(CPP) -D__GENKSYMS__ $(c_flags) $< |                                   \
     $(GENKSYMS) $(if $(1), -T $(2))                                         \
      $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))             \
@@ -169,7 +170,7 @@
 quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@
 cmd_cc_symtypes_c =                                                         \
     set -e;                                                                 \
-    $(call cmd_gensymtypes,true,$@) >/dev/null;                             \
+    $(call cmd_gensymtypes_c,true,$@) >/dev/null;                           \
     test -s $@ || rm -f $@
 
 $(obj)/%.symtypes : $(src)/%.c FORCE
@@ -198,9 +199,10 @@
 #   the actual value of the checksum generated by genksyms
 
 cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $<
-cmd_modversions =								\
+
+cmd_modversions_c =								\
 	if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then		\
-		$(call cmd_gensymtypes,$(KBUILD_SYMTYPES),$(@:.o=.symtypes))	\
+		$(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes))	\
 		    > $(@D)/.tmp_$(@F:.o=.ver);					\
 										\
 		$(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) 			\
@@ -268,13 +270,14 @@
 define rule_cc_o_c
 	$(call echo-cmd,checksrc) $(cmd_checksrc)			  \
 	$(call cmd_and_fixdep,cc_o_c)					  \
-	$(cmd_modversions)						  \
+	$(cmd_modversions_c)						  \
 	$(cmd_objtool)						          \
 	$(call echo-cmd,record_mcount) $(cmd_record_mcount)
 endef
 
 define rule_as_o_S
 	$(call cmd_and_fixdep,as_o_S)					  \
+	$(cmd_modversions_S)						  \
 	$(cmd_objtool)
 endef
 
@@ -314,6 +317,39 @@
 $(real-objs-m)      : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
 $(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
 
+# .S file exports must have their C prototypes defined in asm/asm-prototypes.h
+# or a file that it includes, in order to get versioned symbols. We build a
+# dummy C file that includes asm-prototypes and the EXPORT_SYMBOL lines from
+# the .S file (with trailing ';'), and run genksyms on that, to extract vers.
+#
+# This is convoluted. The .S file must first be preprocessed to run guards and
+# expand names, then the resulting exports must be constructed into plain
+# EXPORT_SYMBOL(symbol); to build our dummy C file, and that gets preprocessed
+# to make the genksyms input.
+#
+# These mirror gensymtypes_c and co above, keep them in synch.
+cmd_gensymtypes_S =                                                         \
+    (echo "\#include <linux/kernel.h>" ;                                    \
+     echo "\#include <asm/asm-prototypes.h>" ;                              \
+    $(CPP) $(a_flags) $< |                                                  \
+     grep "\<___EXPORT_SYMBOL\>" |                                          \
+     sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ) | \
+    $(CPP) -D__GENKSYMS__ $(c_flags) -xc - |                                \
+    $(GENKSYMS) $(if $(1), -T $(2))                                         \
+     $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))             \
+     $(if $(KBUILD_PRESERVE),-p)                                            \
+     -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
+
+quiet_cmd_cc_symtypes_S = SYM $(quiet_modtag) $@
+cmd_cc_symtypes_S =                                                         \
+    set -e;                                                                 \
+    $(call cmd_gensymtypes_S,true,$@) >/dev/null;                           \
+    test -s $@ || rm -f $@
+
+$(obj)/%.symtypes : $(src)/%.S FORCE
+	$(call cmd,cc_symtypes_S)
+
+
 quiet_cmd_cpp_s_S = CPP $(quiet_modtag) $@
 cmd_cpp_s_S       = $(CPP) $(a_flags) -o $@ $<
 
@@ -321,7 +357,37 @@
 	$(call if_changed_dep,cpp_s_S)
 
 quiet_cmd_as_o_S = AS $(quiet_modtag)  $@
-cmd_as_o_S       = $(CC) $(a_flags) -c -o $@ $<
+
+ifndef CONFIG_MODVERSIONS
+cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
+
+else
+
+ASM_PROTOTYPES := $(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/asm-prototypes.h)
+
+ifeq ($(ASM_PROTOTYPES),)
+cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
+
+else
+
+# versioning matches the C process described above, with difference that
+# we parse asm-prototypes.h C header to get function definitions.
+
+cmd_as_o_S = $(CC) $(a_flags) -c -o $(@D)/.tmp_$(@F) $<
+
+cmd_modversions_S =								\
+	if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then		\
+		$(call cmd_gensymtypes_S,$(KBUILD_SYMTYPES),$(@:.o=.symtypes))	\
+		    > $(@D)/.tmp_$(@F:.o=.ver);					\
+										\
+		$(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) 			\
+			-T $(@D)/.tmp_$(@F:.o=.ver);				\
+		rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver);		\
+	else									\
+		mv -f $(@D)/.tmp_$(@F) $@;					\
+	fi;
+endif
+endif
 
 $(obj)/%.o: $(src)/%.S $(objtool_obj) FORCE
 	$(call if_changed_rule,as_o_S)
@@ -430,6 +496,9 @@
 
 $(obj)/lib-ksyms.o: $(lib-target) FORCE
 	$(call if_changed,export_list)
+
+targets += $(obj)/lib-ksyms.o
+
 endif
 
 #
diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
index 973e8c1..17867e7 100755
--- a/scripts/gcc-x86_64-has-stack-protector.sh
+++ b/scripts/gcc-x86_64-has-stack-protector.sh
@@ -1,6 +1,6 @@
 #!/bin/sh
 
-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
 if [ "$?" -eq "0" ] ; then
 	echo y
 else
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 2f909dd..ea81c08 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6907,8 +6907,6 @@ static const struct hda_fixup alc662_fixups[] = {
 		.v.pins = (const struct hda_pintbl[]) {
 			{ 0x15, 0x40f000f0 }, /* disabled */
 			{ 0x16, 0x40f000f0 }, /* disabled */
-			{ 0x18, 0x01014011 }, /* LO */
-			{ 0x1a, 0x01014012 }, /* LO */
 			{ }
 		}
 	},
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 6a23302..4d9d320 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -13,7 +13,8 @@ static void (*old_vmaster_hook)(void *, int);
 static bool is_thinkpad(struct hda_codec *codec)
 {
 	return (codec->core.subsystem_id >> 16 == 0x17aa) &&
-	       (acpi_dev_found("LEN0068") || acpi_dev_found("IBM0068"));
+	       (acpi_dev_found("LEN0068") || acpi_dev_found("LEN0268") ||
+		acpi_dev_found("IBM0068"));
 }
 
 static void update_tpacpi_mute_led(void *private_data, int enabled)
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index 07000f5..b392e51 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -75,6 +75,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
 	data->i2s_port = cpu_dai->driver->id;
 	runtime->private_data = data;
 
+	dma_ch = 0;
 	if (v->alloc_dma_channel)
 		dma_ch = v->alloc_dma_channel(drvdata, dir);
 	if (dma_ch < 0)
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 9e5276d..2ddc034 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -315,7 +315,8 @@ static int snd_usb_audio_free(struct snd_usb_audio *chip)
 		snd_usb_endpoint_free(ep);
 
 	mutex_destroy(&chip->mutex);
-	dev_set_drvdata(&chip->dev->dev, NULL);
+	if (!atomic_read(&chip->shutdown))
+		dev_set_drvdata(&chip->dev->dev, NULL);
 	kfree(chip);
 	return 0;
 }
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 4ffff7b..a53fef0 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1337,8 +1337,8 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
 		}
 
 		if (first) {
-			ui_browser__printf(&browser->b, "%c", folded_sign);
-			width--;
+			ui_browser__printf(&browser->b, "%c ", folded_sign);
+			width -= 2;
 			first = false;
 		} else {
 			ui_browser__printf(&browser->b, "  ");
@@ -1361,8 +1361,10 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
 		width -= hpp.buf - s;
 	}
 
-	ui_browser__write_nstring(&browser->b, "", hierarchy_indent);
-	width -= hierarchy_indent;
+	if (!first) {
+		ui_browser__write_nstring(&browser->b, "", hierarchy_indent);
+		width -= hierarchy_indent;
+	}
 
 	if (column >= browser->b.horiz_scroll) {
 		char s[2048];
@@ -1381,7 +1383,13 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
 		}
 
 		perf_hpp_list__for_each_format(entry->hpp_list, fmt) {
-			ui_browser__write_nstring(&browser->b, "", 2);
+			if (first) {
+				ui_browser__printf(&browser->b, "%c ", folded_sign);
+				first = false;
+			} else {
+				ui_browser__write_nstring(&browser->b, "", 2);
+			}
+
 			width -= 2;
 
 			/*
@@ -1555,10 +1563,11 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
 	int indent = hists->nr_hpp_node - 2;
 	bool first_node, first_col;
 
-	ret = scnprintf(buf, size, " ");
+	ret = scnprintf(buf, size, "  ");
 	if (advance_hpp_check(&dummy_hpp, ret))
 		return ret;
 
+	first_node = true;
 	/* the first hpp_list_node is for overhead columns */
 	fmt_node = list_first_entry(&hists->hpp_formats,
 				    struct perf_hpp_list_node, list);
@@ -1573,12 +1582,16 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
 		ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "  ");
 		if (advance_hpp_check(&dummy_hpp, ret))
 			break;
+
+		first_node = false;
 	}
 
-	ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s",
-			indent * HIERARCHY_INDENT, "");
-	if (advance_hpp_check(&dummy_hpp, ret))
-		return ret;
+	if (!first_node) {
+		ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s",
+				indent * HIERARCHY_INDENT, "");
+		if (advance_hpp_check(&dummy_hpp, ret))
+			return ret;
+	}
 
 	first_node = true;
 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
@@ -2076,8 +2089,21 @@ void hist_browser__init(struct hist_browser *browser,
 	browser->b.use_navkeypressed	= true;
 	browser->show_headers		= symbol_conf.show_hist_headers;
 
-	hists__for_each_format(hists, fmt)
+	if (symbol_conf.report_hierarchy) {
+		struct perf_hpp_list_node *fmt_node;
+
+		/* count overhead columns (in the first node) */
+		fmt_node = list_first_entry(&hists->hpp_formats,
+					    struct perf_hpp_list_node, list);
+		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt)
+			++browser->b.columns;
+
+		/* add a single column for whole hierarchy sort keys*/
 		++browser->b.columns;
+	} else {
+		hists__for_each_format(hists, fmt)
+			++browser->b.columns;
+	}
 
 	hists__reset_column_width(hists);
 }
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index b02992e..a69f027 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1600,18 +1600,18 @@ static void hists__hierarchy_output_resort(struct hists *hists,
 		if (prog)
 			ui_progress__update(prog, 1);
 
+		hists->nr_entries++;
+		if (!he->filtered) {
+			hists->nr_non_filtered_entries++;
+			hists__calc_col_len(hists, he);
+		}
+
 		if (!he->leaf) {
 			hists__hierarchy_output_resort(hists, prog,
 						       &he->hroot_in,
 						       &he->hroot_out,
 						       min_callchain_hits,
 						       use_callchain);
-			hists->nr_entries++;
-			if (!he->filtered) {
-				hists->nr_non_filtered_entries++;
-				hists__calc_col_len(hists, he);
-			}
-
 			continue;
 		}
 
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
index a538ff4..a1883bb 100644
--- a/tools/power/acpi/Makefile.config
+++ b/tools/power/acpi/Makefile.config
@@ -8,18 +8,19 @@
 # as published by the Free Software Foundation; version 2
 # of the License.
 
-include ../../../../scripts/Makefile.include
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
+endif
 
-OUTPUT=./
+include $(srctree)/../../scripts/Makefile.include
+
+OUTPUT=$(srctree)/
 ifeq ("$(origin O)", "command line")
-	OUTPUT := $(O)/
+	OUTPUT := $(O)/power/acpi/
 endif
-
-ifneq ($(OUTPUT),)
-# check that the output directory actually exists
-OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
-$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
-endif
+#$(info Determined 'OUTPUT' to be $(OUTPUT))
 
 # --- CONFIGURATION BEGIN ---
 
@@ -70,8 +71,8 @@
 WARNINGS += $(call cc-supports,-Wstrict-prototypes)
 WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
 
-KERNEL_INCLUDE := ../../../include
-ACPICA_INCLUDE := ../../../drivers/acpi/acpica
+KERNEL_INCLUDE := $(OUTPUT)include
+ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica
 CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
 CFLAGS += $(WARNINGS)
 
diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules
index ec87a9e..3737383 100644
--- a/tools/power/acpi/Makefile.rules
+++ b/tools/power/acpi/Makefile.rules
@@ -8,28 +8,42 @@
 # as published by the Free Software Foundation; version 2
 # of the License.
 
-$(OUTPUT)$(TOOL): $(TOOL_OBJS) FORCE
-	$(ECHO) "  LD      " $@
-	$(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(TOOL_OBJS) -L$(OUTPUT) -o $@
+objdir := $(OUTPUT)tools/$(TOOL)/
+toolobjs := $(addprefix $(objdir),$(TOOL_OBJS))
+$(OUTPUT)$(TOOL): $(toolobjs) FORCE
+	$(ECHO) "  LD      " $(subst $(OUTPUT),,$@)
+	$(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(toolobjs) -L$(OUTPUT) -o $@
+	$(ECHO) "  STRIP   " $(subst $(OUTPUT),,$@)
 	$(QUIET) $(STRIPCMD) $@
 
-$(OUTPUT)%.o: %.c
-	$(ECHO) "  CC      " $@
+$(KERNEL_INCLUDE):
+	$(ECHO) "  MKDIR   " $(subst $(OUTPUT),,$@)
+	$(QUIET) mkdir -p $(KERNEL_INCLUDE)
+	$(ECHO) "  CP      " $(subst $(OUTPUT),,$@)
+	$(QUIET) cp -rf $(srctree)/../../../include/acpi $(KERNEL_INCLUDE)/
+
+$(objdir)%.o: %.c $(KERNEL_INCLUDE)
+	$(ECHO) "  CC      " $(subst $(OUTPUT),,$@)
 	$(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
 
 all: $(OUTPUT)$(TOOL)
 clean:
-	-find $(OUTPUT) \( -not -type d \) \
-	-and \( -name '*~' -o -name '*.[oas]' \) \
-	-type f -print \
-	 | xargs rm -f
-	-rm -f $(OUTPUT)$(TOOL)
+	$(ECHO) "  RMOBJ   " $(subst $(OUTPUT),,$(objdir))
+	$(QUIET) find $(objdir) \( -not -type d \)\
+		 -and \( -name '*~' -o -name '*.[oas]' \)\
+		 -type f -print | xargs rm -f
+	$(ECHO) "  RM      " $(TOOL)
+	$(QUIET) rm -f $(OUTPUT)$(TOOL)
+	$(ECHO) "  RMINC   " $(subst $(OUTPUT),,$(KERNEL_INCLUDE))
+	$(QUIET) rm -rf $(KERNEL_INCLUDE)
 
 install-tools:
-	$(INSTALL) -d $(DESTDIR)${sbindir}
-	$(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)${sbindir}
+	$(ECHO) "  INST    " $(TOOL)
+	$(QUIET) $(INSTALL) -d $(DESTDIR)$(sbindir)
+	$(QUIET) $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)$(sbindir)
 uninstall-tools:
-	- rm -f $(DESTDIR)${sbindir}/$(TOOL)
+	$(ECHO) "  UNINST  " $(TOOL)
+	$(QUIET) rm -f $(DESTDIR)$(sbindir)/$(TOOL)
 
 install: all install-tools $(EXTRA_INSTALL)
 uninstall: uninstall-tools $(EXTRA_UNINSTALL)
diff --git a/tools/power/acpi/tools/acpidbg/Makefile b/tools/power/acpi/tools/acpidbg/Makefile
index 352df4b..f2d06e7 100644
--- a/tools/power/acpi/tools/acpidbg/Makefile
+++ b/tools/power/acpi/tools/acpidbg/Makefile
@@ -17,9 +17,7 @@
 	../../os_specific/service_layers\
 	.
 CFLAGS += -DACPI_APPLICATION -DACPI_SINGLE_THREAD -DACPI_DEBUGGER\
-	-I.\
-	-I../../../../../drivers/acpi/acpica\
-	-I../../../../../include
+	-I.
 LDFLAGS += -lpthread
 TOOL_OBJS = \
 	acpidbg.o
diff --git a/tools/power/acpi/tools/acpidbg/acpidbg.c b/tools/power/acpi/tools/acpidbg/acpidbg.c
index a88ac45..4308362 100644
--- a/tools/power/acpi/tools/acpidbg/acpidbg.c
+++ b/tools/power/acpi/tools/acpidbg/acpidbg.c
@@ -12,10 +12,16 @@
 #include <acpi/acpi.h>
 
 /* Headers not included by include/acpi/platform/aclinux.h */
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <error.h>
 #include <stdbool.h>
 #include <fcntl.h>
 #include <assert.h>
-#include <linux/circ_buf.h>
+#include <sys/select.h>
+#include "../../../../../include/linux/circ_buf.h"
 
 #define ACPI_AML_FILE		"/sys/kernel/debug/acpi/acpidbg"
 #define ACPI_AML_SEC_TICK	1
diff --git a/tools/power/acpi/tools/acpidump/Makefile b/tools/power/acpi/tools/acpidump/Makefile
index 04b5db7..f7c7af1 100644
--- a/tools/power/acpi/tools/acpidump/Makefile
+++ b/tools/power/acpi/tools/acpidump/Makefile
@@ -19,9 +19,7 @@
 	./\
 	../../common\
 	../../os_specific/service_layers
-CFLAGS += -DACPI_DUMP_APP -I.\
-	-I../../../../../drivers/acpi/acpica\
-	-I../../../../../include
+CFLAGS += -DACPI_DUMP_APP -I.
 TOOL_OBJS = \
 	apdump.o\
 	apfiles.o\
@@ -49,7 +47,9 @@
 
 include ../../Makefile.rules
 
-install-man: ../../man/acpidump.8
-	$(INSTALL_DATA) -D $< $(DESTDIR)${mandir}/man8/acpidump.8
+install-man: $(srctree)/man/acpidump.8
+	$(ECHO) "  INST    " acpidump.8
+	$(QUIET) $(INSTALL_DATA) -D $< $(DESTDIR)$(mandir)/man8/acpidump.8
 uninstall-man:
-	- rm -f $(DESTDIR)${mandir}/man8/acpidump.8
+	$(ECHO) "  UNINST  " acpidump.8
+	$(QUIET) rm -f $(DESTDIR)$(mandir)/man8/acpidump.8
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 6e9c40e..69ccce3 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -305,7 +305,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
 			continue;
 		type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
 		       & ARMV8_PMU_EVTYPE_EVENT;
-		if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
+		if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
 		    && (enable & BIT(i))) {
 			reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
 			reg = lower_32_bits(reg);
@@ -379,7 +379,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
 	eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
 
 	/* Software increment event does't need to be backed by a perf event */
-	if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
+	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
+	    select_idx != ARMV8_PMU_CYCLE_IDX)
 		return;
 
 	memset(&attr, 0, sizeof(struct perf_event_attr));
@@ -391,7 +392,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
 	attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
 	attr.exclude_hv = 1; /* Don't count EL2 events */
 	attr.exclude_host = 1; /* Don't count host events */
-	attr.config = eventsel;
+	attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
+		ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
 
 	counter = kvm_pmu_get_counter_value(vcpu, select_idx);
 	/* The initial sample period (overflow count) of an event. */
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 8035cc1..efeceb0a 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -91,6 +91,7 @@ static void async_pf_execute(struct work_struct *work)
 
 	spin_lock(&vcpu->async_pf.lock);
 	list_add_tail(&apf->link, &vcpu->async_pf.done);
+	apf->vcpu = NULL;
 	spin_unlock(&vcpu->async_pf.lock);
 
 	/*
@@ -113,6 +114,8 @@ static void async_pf_execute(struct work_struct *work)
 
 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
 {
+	spin_lock(&vcpu->async_pf.lock);
+
 	/* cancel outstanding work queue item */
 	while (!list_empty(&vcpu->async_pf.queue)) {
 		struct kvm_async_pf *work =
@@ -120,6 +123,14 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
 					 typeof(*work), queue);
 		list_del(&work->queue);
 
+		/*
+		 * We know it's present in vcpu->async_pf.done, do
+		 * nothing here.
+		 */
+		if (!work->vcpu)
+			continue;
+
+		spin_unlock(&vcpu->async_pf.lock);
 #ifdef CONFIG_KVM_ASYNC_PF_SYNC
 		flush_work(&work->work);
 #else
@@ -129,9 +140,9 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
 			kmem_cache_free(async_pf_cache, work);
 		}
 #endif
+		spin_lock(&vcpu->async_pf.lock);
 	}
 
-	spin_lock(&vcpu->async_pf.lock);
 	while (!list_empty(&vcpu->async_pf.done)) {
 		struct kvm_async_pf *work =
 			list_first_entry(&vcpu->async_pf.done,